text
stringlengths 7
3.71M
| id
stringlengths 12
166
| metadata
dict | __index_level_0__
int64 0
658
|
---|---|---|---|
import init, { Model } from "./build/m.js";
async function fetchArrayBuffer(url, cacheFile = true) {
if (!cacheFile) return new Uint8Array(await (await fetch(url)).arrayBuffer());
const cacheName = "blip-candle-cache";
const cache = await caches.open(cacheName);
const cachedResponse = await cache.match(url);
if (cachedResponse) {
const data = await cachedResponse.arrayBuffer();
return new Uint8Array(data);
}
const res = await fetch(url, { cache: "force-cache" });
cache.put(url, res.clone());
return new Uint8Array(await res.arrayBuffer());
}
class Blip {
static instance = {};
static async getInstance(
weightsURL,
tokenizerURL,
configURL,
modelID,
quantized
) {
if (!this.instance[modelID]) {
await init();
self.postMessage({ status: "loading", message: "Loading Model" });
const [weightsArrayU8, tokenizerArrayU8, configArrayU8] =
await Promise.all([
fetchArrayBuffer(weightsURL),
fetchArrayBuffer(tokenizerURL),
fetchArrayBuffer(configURL),
]);
this.instance[modelID] = new Model(
weightsArrayU8,
tokenizerArrayU8,
configArrayU8,
quantized
);
} else {
self.postMessage({ status: "ready", message: "Model Already Loaded" });
}
return this.instance[modelID];
}
}
self.addEventListener("message", async (event) => {
const { weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized } =
event.data;
try {
self.postMessage({ status: "status", message: "Loading Blip Model..." });
const model = await Blip.getInstance(
weightsURL,
tokenizerURL,
configURL,
modelID,
quantized
);
self.postMessage({
status: "status",
message: "Running Blip Inference...",
});
const imageArrayU8 = await fetchArrayBuffer(imageURL, false);
const output = model.generate_caption_from_image(imageArrayU8);
self.postMessage({
status: "complete",
message: "complete",
output: output,
});
} catch (e) {
self.postMessage({ error: e });
}
});
| candle/candle-wasm-examples/blip/blipWorker.js/0 | {
"file_path": "candle/candle-wasm-examples/blip/blipWorker.js",
"repo_id": "candle",
"token_count": 815
} | 90 |
mod app;
pub mod model;
pub mod worker;
pub use app::App;
pub use worker::Worker;
| candle/candle-wasm-examples/llama2-c/src/lib.rs/0 | {
"file_path": "candle/candle-wasm-examples/llama2-c/src/lib.rs",
"repo_id": "candle",
"token_count": 29
} | 91 |
use candle::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use candle_transformers::models::mixformer::{Config, MixFormerSequentialForCausalLM as MixFormer};
use candle_transformers::models::quantized_mixformer::MixFormerSequentialForCausalLM as QMixFormer;
use candle_wasm_example_phi::console_log;
use js_sys::Date;
use serde::Deserialize;
use tokenizers::Tokenizer;
use wasm_bindgen::prelude::*;
enum SelectedModel {
MixFormer(MixFormer),
Quantized(QMixFormer),
}
#[wasm_bindgen]
pub struct Model {
model: SelectedModel,
tokenizer: Tokenizer,
logits_processor: LogitsProcessor,
tokens: Vec<u32>,
repeat_penalty: f32,
repeat_last_n: usize,
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct ModelName {
pub _name_or_path: String,
}
#[wasm_bindgen]
impl Model {
#[wasm_bindgen(constructor)]
pub fn load(
weights: Vec<u8>,
tokenizer: Vec<u8>,
config: Vec<u8>,
quantized: bool,
) -> Result<Model, JsError> {
console_error_panic_hook::set_once();
console_log!("loading model");
let device = Device::Cpu;
let name: ModelName = serde_json::from_slice(&config)?;
let config: Config = serde_json::from_slice(&config)?;
console_log!("config loaded {:?}", name);
let tokenizer =
Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?;
let start = Date::now();
console_log!("weights len: {:?}", weights.len());
let model = if quantized {
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf_buffer(
&weights, &device,
)?;
console_log!("weights loaded");
if name._name_or_path == "microsoft/phi-2" {
let model = QMixFormer::new_v2(&config, vb)?;
SelectedModel::Quantized(model)
} else {
let model = QMixFormer::new(&config, vb)?;
SelectedModel::Quantized(model)
}
} else {
let device = &Device::Cpu;
let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, device)?;
let model = MixFormer::new(&config, vb)?;
SelectedModel::MixFormer(model)
};
console_log!("model loaded in {:?}s", (Date::now() - start) / 1000.);
let logits_processor = LogitsProcessor::new(299792458, None, None);
Ok(Self {
model,
tokenizer,
tokens: vec![],
logits_processor,
repeat_penalty: 1.,
repeat_last_n: 64,
})
}
#[wasm_bindgen]
pub fn init_with_prompt(
&mut self,
prompt: String,
temp: f64,
top_p: f64,
repeat_penalty: f32,
repeat_last_n: usize,
seed: u64,
) -> Result<String, JsError> {
match &mut self.model {
SelectedModel::MixFormer(m) => m.clear_kv_cache(),
SelectedModel::Quantized(m) => m.clear_kv_cache(),
};
let temp = if temp <= 0. { None } else { Some(temp) };
let top_p = if top_p <= 0. || top_p >= 1. {
None
} else {
Some(top_p)
};
self.logits_processor = LogitsProcessor::new(seed, temp, top_p);
self.repeat_penalty = repeat_penalty;
self.repeat_last_n = repeat_last_n;
self.tokens.clear();
let tokens = self
.tokenizer
.encode(prompt, true)
.map_err(|m| JsError::new(&m.to_string()))?
.get_ids()
.to_vec();
let text = self
.process(&tokens)
.map_err(|m| JsError::new(&m.to_string()))?;
Ok(text)
}
#[wasm_bindgen]
pub fn next_token(&mut self) -> Result<String, JsError> {
let last_token = *self.tokens.last().unwrap();
let text = self
.process(&[last_token])
.map_err(|m| JsError::new(&m.to_string()))?;
Ok(text)
}
}
impl Model {
fn process(&mut self, tokens: &[u32]) -> candle::Result<String> {
let dev = Device::Cpu;
let input = Tensor::new(tokens, &dev)?.unsqueeze(0)?;
let logits = match &mut self.model {
SelectedModel::MixFormer(m) => m.forward(&input)?,
SelectedModel::Quantized(m) => m.forward(&input)?,
};
let logits = logits.squeeze(0)?.to_dtype(DType::F32)?;
let logits = if self.repeat_penalty == 1. {
logits
} else {
let start_at = tokens.len().saturating_sub(self.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
self.repeat_penalty,
&tokens[start_at..],
)?
};
let next_token = self.logits_processor.sample(&logits)?;
self.tokens.push(next_token);
let token = match self.tokenizer.decode(&[next_token], false) {
Ok(token) => token,
Err(e) => {
console_log!("error decoding token: {:?}", e);
"".to_string()
}
};
// console_log!("token: {:?}: {:?}", token, next_token);
Ok(token)
}
}
fn main() {
console_error_panic_hook::set_once();
}
| candle/candle-wasm-examples/phi/src/bin/m.rs/0 | {
"file_path": "candle/candle-wasm-examples/phi/src/bin/m.rs",
"repo_id": "candle",
"token_count": 2646
} | 92 |
use crate::languages::LANGUAGES;
use anyhow::Error as E;
use candle::{safetensors::Load, DType, Device, IndexOp, Tensor, D};
use candle_nn::{ops::softmax, VarBuilder};
pub use candle_transformers::models::whisper::{self as m, Config};
use rand::{distributions::Distribution, rngs::StdRng, SeedableRng};
use serde::{Deserialize, Serialize};
use tokenizers::Tokenizer;
use wasm_bindgen::prelude::*;
use yew_agent::{HandlerId, Public, WorkerLink};
#[wasm_bindgen]
extern "C" {
// Use `js_namespace` here to bind `console.log(..)` instead of just
// `log(..)`
#[wasm_bindgen(js_namespace = console)]
pub fn log(s: &str);
}
#[macro_export]
macro_rules! console_log {
// Note that this is using the `log` function imported above during
// `bare_bones`
($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string()))
}
pub const DTYPE: DType = DType::F32;
pub enum Model {
Normal(m::model::Whisper),
Quantized(m::quantized_model::Whisper),
}
// Maybe we should use some traits rather than doing the dispatch for all these.
impl Model {
pub fn config(&self) -> &Config {
match self {
Self::Normal(m) => &m.config,
Self::Quantized(m) => &m.config,
}
}
pub fn encoder_forward(&mut self, x: &Tensor, flush: bool) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.encoder.forward(x, flush),
Self::Quantized(m) => m.encoder.forward(x, flush),
}
}
pub fn decoder_forward(
&mut self,
x: &Tensor,
xa: &Tensor,
flush: bool,
) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.decoder.forward(x, xa, flush),
Self::Quantized(m) => m.decoder.forward(x, xa, flush),
}
}
pub fn decoder_final_linear(&self, x: &Tensor) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.decoder.final_linear(x),
Self::Quantized(m) => m.decoder.final_linear(x),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DecodingResult {
pub tokens: Vec<u32>,
pub text: String,
pub avg_logprob: f64,
pub no_speech_prob: f64,
temperature: f64,
compression_ratio: f64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Segment {
pub start: f64,
pub duration: f64,
pub dr: DecodingResult,
}
pub struct Decoder {
model: Model,
rng: rand::rngs::StdRng,
task: Option<Task>,
language: Option<String>,
is_multilingual: bool,
mel_filters: Vec<f32>,
timestamps: bool,
tokenizer: Tokenizer,
suppress_tokens: Tensor,
sot_token: u32,
transcribe_token: u32,
translate_token: u32,
eot_token: u32,
no_speech_token: u32,
no_timestamps_token: u32,
}
impl Decoder {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
mel_filters: Vec<f32>,
device: &Device,
task: Option<Task>,
language: Option<String>,
is_multilingual: bool,
timestamps: bool,
) -> anyhow::Result<Self> {
let suppress_tokens: Vec<f32> = (0..model.config().vocab_size as u32)
.map(|i| {
if model.config().suppress_tokens.contains(&i) {
f32::NEG_INFINITY
} else {
0f32
}
})
.collect();
let no_timestamps_token = token_id(&tokenizer, m::NO_TIMESTAMPS_TOKEN)?;
let suppress_tokens = Tensor::new(suppress_tokens.as_slice(), device)?;
let sot_token = token_id(&tokenizer, m::SOT_TOKEN)?;
let transcribe_token = token_id(&tokenizer, m::TRANSCRIBE_TOKEN)?;
let translate_token = token_id(&tokenizer, m::TRANSLATE_TOKEN)?;
let eot_token = token_id(&tokenizer, m::EOT_TOKEN)?;
let no_speech_token = m::NO_SPEECH_TOKENS
.iter()
.find_map(|token| token_id(&tokenizer, token).ok());
let no_speech_token = match no_speech_token {
None => anyhow::bail!("unable to find any non-speech token"),
Some(n) => n,
};
let seed = 299792458;
Ok(Self {
model,
rng: StdRng::seed_from_u64(seed),
tokenizer,
mel_filters,
task,
timestamps,
language,
is_multilingual,
suppress_tokens,
sot_token,
transcribe_token,
translate_token,
eot_token,
no_speech_token,
no_timestamps_token,
})
}
fn decode(&mut self, mel: &Tensor, t: f64) -> anyhow::Result<DecodingResult> {
let model = &mut self.model;
let language_token = match (self.is_multilingual, &self.language) {
(true, None) => Some(detect_language(model, &self.tokenizer, mel)?),
(false, None) => None,
(true, Some(language)) => {
match token_id(&self.tokenizer, &format!("<|{:?}|>", self.language)) {
Ok(token_id) => Some(token_id),
Err(_) => anyhow::bail!("language {language} is not supported"),
}
}
(false, Some(_)) => {
anyhow::bail!("a language cannot be set for non-multilingual models")
}
};
let audio_features = model.encoder_forward(mel, true)?;
println!("audio features: {:?}", audio_features.dims());
let sample_len = model.config().max_target_positions / 2;
let mut sum_logprob = 0f64;
let mut no_speech_prob = f64::NAN;
let mut tokens = vec![self.sot_token];
if let Some(language_token) = language_token {
tokens.push(language_token);
}
match self.task {
None | Some(Task::Transcribe) => tokens.push(self.transcribe_token),
Some(Task::Translate) => tokens.push(self.translate_token),
}
if !self.timestamps {
tokens.push(self.no_timestamps_token);
}
for i in 0..sample_len {
let tokens_t = Tensor::new(tokens.as_slice(), mel.device())?;
// The model expects a batch dim but this inference loop does not handle
// it so we add it at this point.
let tokens_t = tokens_t.unsqueeze(0)?;
let ys = model.decoder_forward(&tokens_t, &audio_features, i == 0)?;
// Extract the no speech probability on the first iteration by looking at the first
// token logits and the probability for the according token.
if i == 0 {
let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?;
no_speech_prob = softmax(&logits, 0)?
.i(self.no_speech_token as usize)?
.to_scalar::<f32>()? as f64;
}
let (_, seq_len, _) = ys.dims3()?;
let logits = model
.decoder_final_linear(&ys.i((..1, seq_len - 1..))?)?
.i(0)?
.i(0)?;
// TODO: Besides suppress tokens, we should apply the heuristics from
// ApplyTimestampRules, i.e.:
// - Timestamps come in pairs, except before EOT.
// - Timestamps should be non-decreasing.
// - If the sum of the probabilities of timestamps is higher than any other tokens,
// only consider timestamps when sampling.
// https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L439
let logits = logits.broadcast_add(&self.suppress_tokens)?;
let next_token = if t > 0f64 {
let prs = softmax(&(&logits / t)?, 0)?;
let logits_v: Vec<f32> = prs.to_vec1()?;
let distr = rand::distributions::WeightedIndex::new(&logits_v)?;
distr.sample(&mut self.rng) as u32
} else {
let logits_v: Vec<f32> = logits.to_vec1()?;
logits_v
.iter()
.enumerate()
.max_by(|(_, u), (_, v)| u.total_cmp(v))
.map(|(i, _)| i as u32)
.unwrap()
};
tokens.push(next_token);
let prob = softmax(&logits, candle::D::Minus1)?
.i(next_token as usize)?
.to_scalar::<f32>()? as f64;
if next_token == self.eot_token || tokens.len() > model.config().max_target_positions {
break;
}
sum_logprob += prob.ln();
}
let text = self.tokenizer.decode(&tokens, true).map_err(E::msg)?;
let avg_logprob = sum_logprob / tokens.len() as f64;
Ok(DecodingResult {
tokens,
text,
avg_logprob,
no_speech_prob,
temperature: t,
compression_ratio: f64::NAN,
})
}
fn decode_with_fallback(&mut self, segment: &Tensor) -> anyhow::Result<DecodingResult> {
for (i, &t) in m::TEMPERATURES.iter().enumerate() {
let dr: Result<DecodingResult, _> = self.decode(segment, t);
if i == m::TEMPERATURES.len() - 1 {
return dr;
}
// On errors, we try again with a different temperature.
match dr {
Ok(dr) => {
let needs_fallback = dr.compression_ratio > m::COMPRESSION_RATIO_THRESHOLD
|| dr.avg_logprob < m::LOGPROB_THRESHOLD;
if !needs_fallback || dr.no_speech_prob > m::NO_SPEECH_THRESHOLD {
return Ok(dr);
}
}
Err(err) => {
console_log!("Error running at {t}: {err}")
}
}
}
unreachable!()
}
fn run(&mut self, mel: &Tensor) -> anyhow::Result<Vec<Segment>> {
let (_, _, content_frames) = mel.dims3()?;
let mut seek = 0;
let mut segments = vec![];
while seek < content_frames {
let time_offset = (seek * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64;
let segment_size = usize::min(content_frames - seek, m::N_FRAMES);
let mel_segment = mel.narrow(2, seek, segment_size)?;
let segment_duration = (segment_size * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64;
let dr = self.decode_with_fallback(&mel_segment)?;
seek += segment_size;
if dr.no_speech_prob > m::NO_SPEECH_THRESHOLD && dr.avg_logprob < m::LOGPROB_THRESHOLD {
console_log!("no speech detected, skipping {seek} {dr:?}");
continue;
}
let segment = Segment {
start: time_offset,
duration: segment_duration,
dr,
};
console_log!("{seek}: {segment:?}");
segments.push(segment)
}
Ok(segments)
}
pub fn load(md: ModelData) -> anyhow::Result<Self> {
let device = Device::Cpu;
let tokenizer = Tokenizer::from_bytes(&md.tokenizer).map_err(E::msg)?;
let mel_filters = safetensors::tensor::SafeTensors::deserialize(&md.mel_filters)?;
let mel_filters = mel_filters.tensor("mel_80")?.load(&device)?;
console_log!("loaded mel filters {:?}", mel_filters.shape());
let mel_filters = mel_filters.flatten_all()?.to_vec1::<f32>()?;
let config: Config = serde_json::from_slice(&md.config)?;
let model = if md.quantized {
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf_buffer(
&md.weights,
&device,
)?;
Model::Quantized(m::quantized_model::Whisper::load(&vb, config)?)
} else {
let vb = VarBuilder::from_buffered_safetensors(md.weights, m::DTYPE, &device)?;
Model::Normal(m::model::Whisper::load(&vb, config)?)
};
console_log!("done loading model");
let task = match md.task.as_deref() {
Some("translate") => Some(Task::Translate),
_ => Some(Task::Transcribe),
};
let decoder = Self::new(
model,
tokenizer,
mel_filters,
&device,
task,
md.language,
md.is_multilingual,
md.timestamps,
)?;
Ok(decoder)
}
pub fn convert_and_run(&mut self, wav_input: &[u8]) -> anyhow::Result<Vec<Segment>> {
let device = Device::Cpu;
let mut wav_input = std::io::Cursor::new(wav_input);
let (header, data) = wav::read(&mut wav_input)?;
console_log!("loaded wav data: {header:?}");
if header.sampling_rate != m::SAMPLE_RATE as u32 {
anyhow::bail!("wav file must have a {} sampling rate", m::SAMPLE_RATE);
}
let data = data.as_sixteen().expect("expected 16 bit wav file");
let pcm_data: Vec<_> = data[..data.len() / header.channel_count as usize]
.iter()
.map(|v| *v as f32 / 32768.)
.collect();
console_log!("pcm data loaded {}", pcm_data.len());
let mel = crate::audio::pcm_to_mel(self.model.config(), &pcm_data, &self.mel_filters)?;
let mel_len = mel.len();
let n_mels = self.model.config().num_mel_bins;
let mel = Tensor::from_vec(mel, (1, n_mels, mel_len / n_mels), &device)?;
console_log!("loaded mel: {:?}", mel.dims());
let segments = self.run(&mel)?;
Ok(segments)
}
}
/// Returns the token id for the selected language.
pub fn detect_language(model: &mut Model, tokenizer: &Tokenizer, mel: &Tensor) -> Result<u32, E> {
console_log!("detecting language");
let (_bsize, _, seq_len) = mel.dims3()?;
let mel = mel.narrow(
2,
0,
usize::min(seq_len, model.config().max_source_positions),
)?;
let device = mel.device();
let language_token_ids = LANGUAGES
.iter()
.map(|(t, _)| token_id(tokenizer, &format!("<|{t}|>")))
.map(|e| e.map_err(E::msg))
.collect::<Result<Vec<_>, E>>()?;
let sot_token = token_id(tokenizer, m::SOT_TOKEN)?;
let audio_features = model.encoder_forward(&mel, true)?;
let tokens = Tensor::new(&[[sot_token]], device)?;
let language_token_ids = Tensor::new(language_token_ids.as_slice(), device)?;
let ys = model.decoder_forward(&tokens, &audio_features, true)?;
let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?;
let logits = logits.index_select(&language_token_ids, 0)?;
let probs = candle_nn::ops::softmax(&logits, D::Minus1)?;
let probs = probs.to_vec1::<f32>()?;
let mut probs = LANGUAGES.iter().zip(probs.iter()).collect::<Vec<_>>();
probs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for ((_, language), p) in probs.iter().take(5) {
println!("{language}: {p}")
}
let token = &format!("<|{}|>", probs[0].0 .0);
let language = token_id(tokenizer, token)?;
console_log!("detected language: {language} {token}");
Ok(language)
}
pub fn token_id(tokenizer: &Tokenizer, token: &str) -> candle::Result<u32> {
match tokenizer.token_to_id(token) {
None => candle::bail!("no token-id for {token}"),
Some(id) => Ok(id),
}
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug)]
pub enum Task {
Transcribe,
Translate,
}
// Communication to the worker happens through bincode, the model weights and configs are fetched
// on the main thread and transferred via the following structure.
#[derive(Serialize, Deserialize)]
pub struct ModelData {
pub weights: Vec<u8>,
pub tokenizer: Vec<u8>,
pub mel_filters: Vec<u8>,
pub config: Vec<u8>,
pub quantized: bool,
pub timestamps: bool,
pub is_multilingual: bool,
pub language: Option<String>,
pub task: Option<String>,
}
pub struct Worker {
link: WorkerLink<Self>,
decoder: Option<Decoder>,
}
#[derive(Serialize, Deserialize)]
pub enum WorkerInput {
ModelData(ModelData),
DecodeTask { wav_bytes: Vec<u8> },
}
#[derive(Serialize, Deserialize)]
pub enum WorkerOutput {
Decoded(Vec<Segment>),
WeightsLoaded,
}
impl yew_agent::Worker for Worker {
type Input = WorkerInput;
type Message = ();
type Output = Result<WorkerOutput, String>;
type Reach = Public<Self>;
fn create(link: WorkerLink<Self>) -> Self {
Self {
link,
decoder: None,
}
}
fn update(&mut self, _msg: Self::Message) {
// no messaging
}
fn handle_input(&mut self, msg: Self::Input, id: HandlerId) {
let output = match msg {
WorkerInput::ModelData(md) => match Decoder::load(md) {
Ok(decoder) => {
self.decoder = Some(decoder);
Ok(WorkerOutput::WeightsLoaded)
}
Err(err) => Err(format!("model creation error {err:?}")),
},
WorkerInput::DecodeTask { wav_bytes } => match &mut self.decoder {
None => Err("model has not been set".to_string()),
Some(decoder) => decoder
.convert_and_run(&wav_bytes)
.map(WorkerOutput::Decoded)
.map_err(|e| e.to_string()),
},
};
self.link.respond(id, output);
}
fn name_of_resource() -> &'static str {
"worker.js"
}
fn resource_path_is_relative() -> bool {
true
}
}
| candle/candle-wasm-examples/whisper/src/worker.rs/0 | {
"file_path": "candle/candle-wasm-examples/whisper/src/worker.rs",
"repo_id": "candle",
"token_count": 8765
} | 93 |
[package]
name = "candle-wasm-tests"
version.workspace = true
edition.workspace = true
description = "WASM tests for candle"
keywords.workspace = true
categories.workspace = true
[dependencies]
candle = { workspace = true }
rand = { workspace = true }
getrandom = { version = "0.2", features = ["js"] }
[dev-dependencies]
wasm-bindgen-test = "0.3.0"
| candle/candle-wasm-tests/Cargo.toml/0 | {
"file_path": "candle/candle-wasm-tests/Cargo.toml",
"repo_id": "candle",
"token_count": 122
} | 94 |
# syntax=docker/dockerfile:1
# read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
# you will also find guides on how best to write your Dockerfile
ARG INCLUDE_DB=false
# stage that install the dependencies
FROM node:20 as builder-production
WORKDIR /app
COPY --link --chown=1000 package-lock.json package.json ./
RUN --mount=type=cache,target=/app/.npm \
npm set cache /app/.npm && \
npm ci --omit=dev
FROM builder-production as builder
ARG APP_BASE=
ARG PUBLIC_APP_COLOR=blue
RUN --mount=type=cache,target=/app/.npm \
npm set cache /app/.npm && \
npm ci
COPY --link --chown=1000 . .
RUN npm run build
# mongo image
FROM mongo:latest as mongo
# image to be used if INCLUDE_DB is false
FROM node:20-slim as local_db_false
# image to be used if INCLUDE_DB is true
FROM node:20-slim as local_db_true
RUN apt-get update
RUN apt-get install gnupg curl -y
# copy mongo from the other stage
COPY --from=mongo /usr/bin/mongo* /usr/bin/
ENV MONGODB_URL=mongodb://localhost:27017
RUN mkdir -p /data/db
RUN chown -R 1000:1000 /data/db
# final image
FROM local_db_${INCLUDE_DB} as final
# build arg to determine if the database should be included
ARG INCLUDE_DB=false
ENV INCLUDE_DB=${INCLUDE_DB}
# svelte requires APP_BASE at build time so it must be passed as a build arg
ARG APP_BASE=
# tailwind requires the primary theme to be known at build time so it must be passed as a build arg
ARG PUBLIC_APP_COLOR=blue
# install dotenv-cli
RUN npm install -g dotenv-cli
# switch to a user that works for spaces
RUN userdel -r node
RUN useradd -m -u 1000 user
USER user
ENV HOME=/home/user \
PATH=/home/user/.local/bin:$PATH
WORKDIR /app
# add a .env.local if the user doesn't bind a volume to it
RUN touch /app/.env.local
# get the default config, the entrypoint script and the server script
COPY --chown=1000 package.json /app/package.json
COPY --chown=1000 .env /app/.env
COPY --chown=1000 entrypoint.sh /app/entrypoint.sh
COPY --chown=1000 gcp-*.json /app/
#import the build & dependencies
COPY --from=builder --chown=1000 /app/build /app/build
COPY --from=builder --chown=1000 /app/node_modules /app/node_modules
RUN npx playwright install
USER root
RUN npx playwright install-deps
USER user
RUN chmod +x /app/entrypoint.sh
CMD ["/bin/bash", "-c", "/app/entrypoint.sh"]
| chat-ui/Dockerfile/0 | {
"file_path": "chat-ui/Dockerfile",
"repo_id": "chat-ui",
"token_count": 870
} | 95 |
ENV_LOCAL_PATH=/app/.env.local
if test -z "${DOTENV_LOCAL}" ; then
if ! test -f "${ENV_LOCAL_PATH}" ; then
echo "DOTENV_LOCAL was not found in the ENV variables and .env.local is not set using a bind volume. Make sure to set environment variables properly. "
fi;
else
echo "DOTENV_LOCAL was found in the ENV variables. Creating .env.local file."
cat <<< "$DOTENV_LOCAL" > ${ENV_LOCAL_PATH}
fi;
if [ "$INCLUDE_DB" = "true" ] ; then
echo "Starting local MongoDB instance"
nohup mongod &
fi;
export PUBLIC_VERSION=$(node -p "require('./package.json').version")
dotenv -e /app/.env -c -- node /app/build/index.js -- --host 0.0.0.0 --port 3000 | chat-ui/entrypoint.sh/0 | {
"file_path": "chat-ui/entrypoint.sh",
"repo_id": "chat-ui",
"token_count": 266
} | 96 |
<script lang="ts">
import { afterUpdate } from "svelte";
import CopyToClipBoardBtn from "./CopyToClipBoardBtn.svelte";
export let code = "";
export let lang = "";
$: highlightedCode = "";
afterUpdate(async () => {
const { default: hljs } = await import("highlight.js");
const language = hljs.getLanguage(lang);
highlightedCode = hljs.highlightAuto(code, language?.aliases).value;
});
</script>
<div class="group relative my-4 rounded-lg">
<!-- eslint-disable svelte/no-at-html-tags -->
<pre
class="scrollbar-custom overflow-auto px-5 scrollbar-thumb-gray-500 hover:scrollbar-thumb-gray-400 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20"><code
class="language-{lang}">{@html highlightedCode || code.replaceAll("<", "<")}</code
></pre>
<CopyToClipBoardBtn
classNames="absolute top-2 right-2 invisible opacity-0 group-hover:visible group-hover:opacity-100"
value={code}
/>
</div>
| chat-ui/src/lib/components/CodeBlock.svelte/0 | {
"file_path": "chat-ui/src/lib/components/CodeBlock.svelte",
"repo_id": "chat-ui",
"token_count": 345
} | 97 |
<script lang="ts">
import CarbonRotate360 from "~icons/carbon/rotate-360";
export let classNames = "";
</script>
<button
type="button"
on:click
class="btn flex h-8 rounded-lg border bg-white px-3 py-1 text-gray-500 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600 {classNames}"
>
<CarbonRotate360 class="mr-2 text-xs " /> Retry
</button>
| chat-ui/src/lib/components/RetryBtn.svelte/0 | {
"file_path": "chat-ui/src/lib/components/RetryBtn.svelte",
"repo_id": "chat-ui",
"token_count": 157
} | 98 |
<script lang="ts">
export let classNames = "";
</script>
<svg
width="1em"
height="1em"
viewBox="0 0 15 6"
class={classNames}
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M1.67236 1L7.67236 7L13.6724 1"
stroke="currentColor"
stroke-width="2"
stroke-linecap="round"
stroke-linejoin="round"
/>
</svg>
| chat-ui/src/lib/components/icons/IconChevron.svelte/0 | {
"file_path": "chat-ui/src/lib/components/icons/IconChevron.svelte",
"repo_id": "chat-ui",
"token_count": 156
} | 99 |
import { Issuer, BaseClient, type UserinfoResponse, TokenSet, custom } from "openid-client";
import { addHours, addWeeks } from "date-fns";
import { env } from "$env/dynamic/private";
import { sha256 } from "$lib/utils/sha256";
import { z } from "zod";
import { dev } from "$app/environment";
import type { Cookies } from "@sveltejs/kit";
import { collections } from "$lib/server/database";
import JSON5 from "json5";
import { logger } from "$lib/server/logger";
export interface OIDCSettings {
redirectURI: string;
}
export interface OIDCUserInfo {
token: TokenSet;
userData: UserinfoResponse;
}
const stringWithDefault = (value: string) =>
z
.string()
.default(value)
.transform((el) => (el ? el : value));
export const OIDConfig = z
.object({
CLIENT_ID: stringWithDefault(env.OPENID_CLIENT_ID),
CLIENT_SECRET: stringWithDefault(env.OPENID_CLIENT_SECRET),
PROVIDER_URL: stringWithDefault(env.OPENID_PROVIDER_URL),
SCOPES: stringWithDefault(env.OPENID_SCOPES),
NAME_CLAIM: stringWithDefault(env.OPENID_NAME_CLAIM).refine(
(el) => !["preferred_username", "email", "picture", "sub"].includes(el),
{ message: "nameClaim cannot be one of the restricted keys." }
),
TOLERANCE: stringWithDefault(env.OPENID_TOLERANCE),
RESOURCE: stringWithDefault(env.OPENID_RESOURCE),
})
.parse(JSON5.parse(env.OPENID_CONFIG));
export const requiresUser = !!OIDConfig.CLIENT_ID && !!OIDConfig.CLIENT_SECRET;
export function refreshSessionCookie(cookies: Cookies, sessionId: string) {
cookies.set(env.COOKIE_NAME, sessionId, {
path: "/",
// So that it works inside the space's iframe
sameSite: dev || env.ALLOW_INSECURE_COOKIES === "true" ? "lax" : "none",
secure: !dev && !(env.ALLOW_INSECURE_COOKIES === "true"),
httpOnly: true,
expires: addWeeks(new Date(), 2),
});
}
export async function findUser(sessionId: string) {
const session = await collections.sessions.findOne({ sessionId });
if (!session) {
return null;
}
return await collections.users.findOne({ _id: session.userId });
}
export const authCondition = (locals: App.Locals) => {
return locals.user
? { userId: locals.user._id }
: { sessionId: locals.sessionId, userId: { $exists: false } };
};
/**
* Generates a CSRF token using the user sessionId. Note that we don't need a secret because sessionId is enough.
*/
export async function generateCsrfToken(sessionId: string, redirectUrl: string): Promise<string> {
const data = {
expiration: addHours(new Date(), 1).getTime(),
redirectUrl,
};
return Buffer.from(
JSON.stringify({
data,
signature: await sha256(JSON.stringify(data) + "##" + sessionId),
})
).toString("base64");
}
async function getOIDCClient(settings: OIDCSettings): Promise<BaseClient> {
const issuer = await Issuer.discover(OIDConfig.PROVIDER_URL);
return new issuer.Client({
client_id: OIDConfig.CLIENT_ID,
client_secret: OIDConfig.CLIENT_SECRET,
redirect_uris: [settings.redirectURI],
response_types: ["code"],
[custom.clock_tolerance]: OIDConfig.TOLERANCE || undefined,
});
}
export async function getOIDCAuthorizationUrl(
settings: OIDCSettings,
params: { sessionId: string }
): Promise<string> {
const client = await getOIDCClient(settings);
const csrfToken = await generateCsrfToken(params.sessionId, settings.redirectURI);
return client.authorizationUrl({
scope: OIDConfig.SCOPES,
state: csrfToken,
resource: OIDConfig.RESOURCE || undefined,
});
}
export async function getOIDCUserData(settings: OIDCSettings, code: string): Promise<OIDCUserInfo> {
const client = await getOIDCClient(settings);
const token = await client.callback(settings.redirectURI, { code });
const userData = await client.userinfo(token);
return { token, userData };
}
export async function validateAndParseCsrfToken(
token: string,
sessionId: string
): Promise<{
/** This is the redirect url that was passed to the OIDC provider */
redirectUrl: string;
} | null> {
try {
const { data, signature } = z
.object({
data: z.object({
expiration: z.number().int(),
redirectUrl: z.string().url(),
}),
signature: z.string().length(64),
})
.parse(JSON.parse(token));
const reconstructSign = await sha256(JSON.stringify(data) + "##" + sessionId);
if (data.expiration > Date.now() && signature === reconstructSign) {
return { redirectUrl: data.redirectUrl };
}
} catch (e) {
logger.error(e);
}
return null;
}
| chat-ui/src/lib/server/auth.ts/0 | {
"file_path": "chat-ui/src/lib/server/auth.ts",
"repo_id": "chat-ui",
"token_count": 1566
} | 100 |
import { env } from "$env/dynamic/private";
import { buildPrompt } from "$lib/buildPrompt";
import type { TextGenerationStreamOutput } from "@huggingface/inference";
import type { Endpoint } from "../endpoints";
import { z } from "zod";
import { logger } from "$lib/server/logger";
export const endpointLlamacppParametersSchema = z.object({
weight: z.number().int().positive().default(1),
model: z.any(),
type: z.literal("llamacpp"),
url: z.string().url().default("http://127.0.0.1:8080"),
accessToken: z
.string()
.min(1)
.default(env.HF_TOKEN ?? env.HF_ACCESS_TOKEN),
});
export function endpointLlamacpp(
input: z.input<typeof endpointLlamacppParametersSchema>
): Endpoint {
const { url, model } = endpointLlamacppParametersSchema.parse(input);
return async ({ messages, preprompt, continueMessage, generateSettings }) => {
const prompt = await buildPrompt({
messages,
continueMessage,
preprompt,
model,
});
const parameters = { ...model.parameters, ...generateSettings };
const r = await fetch(`${url}/completion`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
prompt,
stream: true,
temperature: parameters.temperature,
top_p: parameters.top_p,
top_k: parameters.top_k,
stop: parameters.stop,
repeat_penalty: parameters.repetition_penalty,
n_predict: parameters.max_new_tokens,
cache_prompt: true,
}),
});
if (!r.ok) {
throw new Error(`Failed to generate text: ${await r.text()}`);
}
const encoder = new TextDecoderStream();
const reader = r.body?.pipeThrough(encoder).getReader();
return (async function* () {
let stop = false;
let generatedText = "";
let tokenId = 0;
let accumulatedData = ""; // Buffer to accumulate data chunks
while (!stop) {
// Read the stream and log the outputs to console
const out = (await reader?.read()) ?? { done: false, value: undefined };
// If it's done, we cancel
if (out.done) {
reader?.cancel();
return;
}
if (!out.value) {
return;
}
// Accumulate the data chunk
accumulatedData += out.value;
// Process each complete JSON object in the accumulated data
while (accumulatedData.includes("\n")) {
// Assuming each JSON object ends with a newline
const endIndex = accumulatedData.indexOf("\n");
let jsonString = accumulatedData.substring(0, endIndex).trim();
// Remove the processed part from the buffer
accumulatedData = accumulatedData.substring(endIndex + 1);
if (jsonString.startsWith("data: ")) {
jsonString = jsonString.slice(6);
let data = null;
try {
data = JSON.parse(jsonString);
} catch (e) {
logger.error("Failed to parse JSON", e);
logger.error("Problematic JSON string:", jsonString);
continue; // Skip this iteration and try the next chunk
}
// Handle the parsed data
if (data.content || data.stop) {
generatedText += data.content;
const output: TextGenerationStreamOutput = {
token: {
id: tokenId++,
text: data.content ?? "",
logprob: 0,
special: false,
},
generated_text: data.stop ? generatedText : null,
details: null,
};
if (data.stop) {
stop = true;
output.token.special = true;
reader?.cancel();
}
yield output;
}
}
}
}
})();
};
}
export default endpointLlamacpp;
| chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/llamacpp/endpointLlamacpp.ts",
"repo_id": "chat-ui",
"token_count": 1430
} | 101 |
import { env } from "$env/dynamic/private";
import { generateFromDefaultEndpoint } from "$lib/server/generateFromDefaultEndpoint";
import type { Message } from "$lib/types/Message";
import { logger } from "$lib/server/logger";
export async function summarize(prompt: string) {
if (!env.LLM_SUMMERIZATION) {
return prompt.split(/\s+/g).slice(0, 5).join(" ");
}
const messages: Array<Omit<Message, "id">> = [
{ from: "user", content: "Who is the president of Gabon?" },
{ from: "assistant", content: "🇬🇦 President of Gabon" },
{ from: "user", content: "Who is Julien Chaumond?" },
{ from: "assistant", content: "🧑 Julien Chaumond" },
{ from: "user", content: "what is 1 + 1?" },
{ from: "assistant", content: "🔢 Simple math operation" },
{ from: "user", content: "What are the latest news?" },
{ from: "assistant", content: "📰 Latest news" },
{ from: "user", content: "How to make a great cheesecake?" },
{ from: "assistant", content: "🍰 Cheesecake recipe" },
{ from: "user", content: "what is your favorite movie? do a short answer." },
{ from: "assistant", content: "🎥 Favorite movie" },
{ from: "user", content: "Explain the concept of artificial intelligence in one sentence" },
{ from: "assistant", content: "🤖 AI definition" },
{ from: "user", content: prompt },
];
return await generateFromDefaultEndpoint({
messages,
preprompt:
"You are a summarization AI. Summarize the user's request into a single short sentence of four words or less. Do not try to answer it, only summarize the user's query. Always start your answer with an emoji relevant to the summary",
generateSettings: {
max_new_tokens: 15,
},
})
.then((summary) => {
// add an emoji if none is found in the first three characters
if (!/\p{Emoji}/u.test(summary.slice(0, 3))) {
return "💬 " + summary;
}
return summary;
})
.catch((e) => {
logger.error(e);
return null;
});
}
| chat-ui/src/lib/server/summarize.ts/0 | {
"file_path": "chat-ui/src/lib/server/summarize.ts",
"repo_id": "chat-ui",
"token_count": 682
} | 102 |
import { WebSearchProvider, type WebSearchSource } from "$lib/types/WebSearch";
import { env } from "$env/dynamic/private";
import searchSerper from "./endpoints/serper";
import searchSerpApi from "./endpoints/serpApi";
import searchSerpStack from "./endpoints/serpStack";
import searchYouApi from "./endpoints/youApi";
import searchWebLocal from "./endpoints/webLocal";
import searchSearxng from "./endpoints/searxng";
export function getWebSearchProvider() {
if (env.YDC_API_KEY) return WebSearchProvider.YOU;
if (env.SEARXNG_QUERY_URL) return WebSearchProvider.SEARXNG;
return WebSearchProvider.GOOGLE;
}
/** Searches the web using the first available provider, based on the env */
export async function searchWeb(query: string): Promise<WebSearchSource[]> {
if (env.USE_LOCAL_WEBSEARCH) return searchWebLocal(query);
if (env.SEARXNG_QUERY_URL) return searchSearxng(query);
if (env.SERPER_API_KEY) return searchSerper(query);
if (env.YDC_API_KEY) return searchYouApi(query);
if (env.SERPAPI_KEY) return searchSerpApi(query);
if (env.SERPSTACK_API_KEY) return searchSerpStack(query);
throw new Error(
"No configuration found for web search. Please set USE_LOCAL_WEBSEARCH, SEARXNG_QUERY_URL, SERPER_API_KEY, YDC_API_KEY, or SERPSTACK_API_KEY in your environment variables."
);
}
| chat-ui/src/lib/server/websearch/search/endpoints.ts/0 | {
"file_path": "chat-ui/src/lib/server/websearch/search/endpoints.ts",
"repo_id": "chat-ui",
"token_count": 443
} | 103 |
import { writable } from "svelte/store";
export interface WebSearchParameters {
useSearch: boolean;
nItems: number;
}
export const webSearchParameters = writable<WebSearchParameters>({
useSearch: false,
nItems: 5,
});
| chat-ui/src/lib/stores/webSearchParameters.ts/0 | {
"file_path": "chat-ui/src/lib/stores/webSearchParameters.ts",
"repo_id": "chat-ui",
"token_count": 68
} | 104 |
import { defaultModel } from "$lib/server/models";
import type { Assistant } from "./Assistant";
import type { Timestamps } from "./Timestamps";
import type { User } from "./User";
export interface Settings extends Timestamps {
userId?: User["_id"];
sessionId?: string;
/**
* Note: Only conversations with this settings explicitly set to true should be shared.
*
* This setting is explicitly set to true when users accept the ethics modal.
* */
shareConversationsWithModelAuthors: boolean;
ethicsModalAcceptedAt: Date | null;
activeModel: string;
hideEmojiOnSidebar?: boolean;
// model name and system prompts
customPrompts?: Record<string, string>;
assistants?: Assistant["_id"][];
}
// TODO: move this to a constant file along with other constants
export const DEFAULT_SETTINGS = {
shareConversationsWithModelAuthors: true,
activeModel: defaultModel.id,
hideEmojiOnSidebar: false,
customPrompts: {},
assistants: [],
};
| chat-ui/src/lib/types/Settings.ts/0 | {
"file_path": "chat-ui/src/lib/types/Settings.ts",
"repo_id": "chat-ui",
"token_count": 289
} | 105 |
import { base } from "$app/paths";
import { env as envPublic } from "$env/dynamic/public";
export function getShareUrl(url: URL, shareId: string): string {
return `${
envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || url.origin}${base}`
}/r/${shareId}`;
}
| chat-ui/src/lib/utils/getShareUrl.ts/0 | {
"file_path": "chat-ui/src/lib/utils/getShareUrl.ts",
"repo_id": "chat-ui",
"token_count": 102
} | 106 |
import type { Message } from "$lib/types/Message";
import Handlebars from "handlebars";
Handlebars.registerHelper("ifUser", function (this: Pick<Message, "from" | "content">, options) {
if (this.from == "user") return options.fn(this);
});
Handlebars.registerHelper(
"ifAssistant",
function (this: Pick<Message, "from" | "content">, options) {
if (this.from == "assistant") return options.fn(this);
}
);
export function compileTemplate<T>(input: string, model: { preprompt: string }) {
const template = Handlebars.compile<T>(input, {
knownHelpers: { ifUser: true, ifAssistant: true },
knownHelpersOnly: true,
noEscape: true,
strict: true,
preventIndent: true,
});
return function render(inputs: T, options?: RuntimeOptions) {
return template({ ...model, ...inputs }, options);
};
}
| chat-ui/src/lib/utils/template.ts/0 | {
"file_path": "chat-ui/src/lib/utils/template.ts",
"repo_id": "chat-ui",
"token_count": 266
} | 107 |
<script lang="ts">
import { goto } from "$app/navigation";
import { base } from "$app/paths";
import { env as envPublic } from "$env/dynamic/public";
import ChatWindow from "$lib/components/chat/ChatWindow.svelte";
import { ERROR_MESSAGES, error } from "$lib/stores/errors";
import { pendingMessage } from "$lib/stores/pendingMessage";
import { useSettingsStore } from "$lib/stores/settings.js";
import { findCurrentModel } from "$lib/utils/models";
export let data;
let loading = false;
let files: File[] = [];
const settings = useSettingsStore();
async function createConversation(message: string) {
try {
loading = true;
// check if $settings.activeModel is a valid model
// else check if it's an assistant, and use that model
// else use the first model
const validModels = data.models.map((model) => model.id);
let model;
if (validModels.includes($settings.activeModel)) {
model = $settings.activeModel;
} else {
if (validModels.includes(data.assistant?.modelId)) {
model = data.assistant?.modelId;
} else {
model = data.models[0].id;
}
}
const res = await fetch(`${base}/conversation`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
model,
preprompt: $settings.customPrompts[$settings.activeModel],
assistantId: data.assistant?._id,
}),
});
if (!res.ok) {
const errorMessage = (await res.json()).message || ERROR_MESSAGES.default;
error.set(errorMessage);
console.error("Error while creating conversation: ", errorMessage);
return;
}
const { conversationId } = await res.json();
// Ugly hack to use a store as temp storage, feel free to improve ^^
pendingMessage.set({
content: message,
files,
});
// invalidateAll to update list of conversations
await goto(`${base}/conversation/${conversationId}`, { invalidateAll: true });
} catch (err) {
error.set((err as Error).message || ERROR_MESSAGES.default);
console.error(err);
} finally {
loading = false;
}
}
</script>
<svelte:head>
<title>{envPublic.PUBLIC_APP_NAME}</title>
</svelte:head>
<ChatWindow
on:message={(ev) => createConversation(ev.detail)}
{loading}
assistant={data.assistant}
currentModel={findCurrentModel([...data.models, ...data.oldModels], $settings.activeModel)}
models={data.models}
bind:files
/>
| chat-ui/src/routes/+page.svelte/0 | {
"file_path": "chat-ui/src/routes/+page.svelte",
"repo_id": "chat-ui",
"token_count": 899
} | 108 |
import type { RequestHandler } from "./$types";
import { collections } from "$lib/server/database";
import { ObjectId } from "mongodb";
import { error, redirect } from "@sveltejs/kit";
import { base } from "$app/paths";
import { z } from "zod";
import type { Message } from "$lib/types/Message";
import { models, validateModel } from "$lib/server/models";
import { defaultEmbeddingModel } from "$lib/server/embeddingModels";
import { v4 } from "uuid";
import { authCondition } from "$lib/server/auth";
import { usageLimits } from "$lib/server/usageLimits";
export const POST: RequestHandler = async ({ locals, request }) => {
const body = await request.text();
let title = "";
const parsedBody = z
.object({
fromShare: z.string().optional(),
model: validateModel(models),
assistantId: z.string().optional(),
preprompt: z.string().optional(),
})
.safeParse(JSON.parse(body));
if (!parsedBody.success) {
throw error(400, "Invalid request");
}
const values = parsedBody.data;
const convCount = await collections.conversations.countDocuments(authCondition(locals));
if (usageLimits?.conversations && convCount > usageLimits?.conversations) {
throw error(
429,
"You have reached the maximum number of conversations. Delete some to continue."
);
}
const model = models.find((m) => (m.id || m.name) === values.model);
if (!model) {
throw error(400, "Invalid model");
}
let messages: Message[] = [
{
id: v4(),
from: "system",
content: values.preprompt ?? "",
createdAt: new Date(),
updatedAt: new Date(),
children: [],
ancestors: [],
},
];
let rootMessageId: Message["id"] = messages[0].id;
let embeddingModel: string;
if (values.fromShare) {
const conversation = await collections.sharedConversations.findOne({
_id: values.fromShare,
});
if (!conversation) {
throw error(404, "Conversation not found");
}
title = conversation.title;
messages = conversation.messages;
rootMessageId = conversation.rootMessageId ?? rootMessageId;
values.model = conversation.model;
values.preprompt = conversation.preprompt;
values.assistantId = conversation.assistantId?.toString();
embeddingModel = conversation.embeddingModel;
}
embeddingModel ??= model.embeddingModel ?? defaultEmbeddingModel.name;
if (model.unlisted) {
throw error(400, "Can't start a conversation with an unlisted model");
}
// get preprompt from assistant if it exists
const assistant = await collections.assistants.findOne({
_id: new ObjectId(values.assistantId),
});
if (assistant) {
values.preprompt = assistant.preprompt;
} else {
values.preprompt ??= model?.preprompt ?? "";
}
if (messages && messages.length > 0 && messages[0].from === "system") {
messages[0].content = values.preprompt;
}
const res = await collections.conversations.insertOne({
_id: new ObjectId(),
title: title || "New Chat",
rootMessageId,
messages,
model: values.model,
preprompt: values.preprompt,
assistantId: values.assistantId ? new ObjectId(values.assistantId) : undefined,
createdAt: new Date(),
updatedAt: new Date(),
userAgent: request.headers.get("User-Agent") ?? undefined,
embeddingModel,
...(locals.user ? { userId: locals.user._id } : { sessionId: locals.sessionId }),
...(values.fromShare ? { meta: { fromShareId: values.fromShare } } : {}),
});
return new Response(
JSON.stringify({
conversationId: res.insertedId.toString(),
}),
{ headers: { "Content-Type": "application/json" } }
);
};
export const GET: RequestHandler = async () => {
throw redirect(302, `${base}/`);
};
| chat-ui/src/routes/conversation/+server.ts/0 | {
"file_path": "chat-ui/src/routes/conversation/+server.ts",
"repo_id": "chat-ui",
"token_count": 1227
} | 109 |
<script lang="ts">
import type { PageData } from "./$types";
import { env as envPublic } from "$env/dynamic/public";
import { isHuggingChat } from "$lib/utils/isHuggingChat";
import { base } from "$app/paths";
import { page } from "$app/stores";
import CarbonHelpFilled from "~icons/carbon/help-filled";
export let data: PageData;
</script>
<svelte:head>
{#if isHuggingChat}
<title>HuggingChat - Models</title>
<meta property="og:title" content="HuggingChat - Models" />
<meta property="og:type" content="link" />
<meta property="og:description" content="Browse HuggingChat available models" />
<meta property="og:url" content={$page.url.href} />
{/if}
</svelte:head>
<div class="scrollbar-custom mr-1 h-full overflow-y-auto py-12 max-sm:pt-8 md:py-24">
<div class="pt-42 mx-auto flex flex-col px-5 xl:w-[60rem] 2xl:w-[64rem]">
<div class="flex items-center">
<h1 class="text-2xl font-bold">Models</h1>
{#if isHuggingChat}
<a
href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/372"
class="ml-auto dark:text-gray-400 dark:hover:text-gray-300"
target="_blank"
>
<CarbonHelpFilled />
</a>
{/if}
</div>
<h3 class="text-gray-500">All models available on {envPublic.PUBLIC_APP_NAME}</h3>
<dl class="mt-8 grid grid-cols-1 gap-3 sm:gap-5 xl:grid-cols-2">
{#each data.models.filter((el) => !el.unlisted) as model, index (model.id)}
<a
href="{base}/models/{model.id}"
class="relative flex flex-col gap-2 overflow-hidden rounded-xl border bg-gray-50/50 px-6 py-5 shadow hover:bg-gray-50 hover:shadow-inner dark:border-gray-800/70 dark:bg-gray-950/20 dark:hover:bg-gray-950/40"
>
<div class="flex items-center justify-between">
{#if model.logoUrl}
<img
class=" overflown aspect-square size-6 rounded border dark:border-gray-700"
src={model.logoUrl}
alt=""
/>
{:else}
<div class="size-6 rounded border border-transparent bg-gray-300 dark:bg-gray-800" />
{/if}
{#if index === 0}
<div
class="rounded-full border border-gray-300 px-2 py-0.5 text-xs text-gray-500 dark:border-gray-500 dark:text-gray-400"
>
Default
</div>
{/if}
</div>
<dt class="flex items-center gap-2 font-semibold">
{model.displayName}
</dt>
<dd class="whitespace-pre-wrap text-sm text-gray-500 dark:text-gray-400">
{model.description || "-"}
</dd>
</a>
{/each}
</dl>
</div>
</div>
| chat-ui/src/routes/models/+page.svelte/0 | {
"file_path": "chat-ui/src/routes/models/+page.svelte",
"repo_id": "chat-ui",
"token_count": 1126
} | 110 |
import { collections } from "$lib/server/database";
import { error, type RequestHandler } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
export const GET: RequestHandler = async ({ params }) => {
const assistant = await collections.assistants.findOne({
_id: new ObjectId(params.assistantId),
});
if (!assistant) {
throw error(404, "No assistant found");
}
if (!assistant.avatar) {
throw error(404, "No avatar found");
}
const fileId = collections.bucket.find({ filename: assistant._id.toString() });
const content = await fileId.next().then(async (file) => {
if (!file?._id) {
throw error(404, "Avatar not found");
}
const fileStream = collections.bucket.openDownloadStream(file?._id);
const fileBuffer = await new Promise<Buffer>((resolve, reject) => {
const chunks: Uint8Array[] = [];
fileStream.on("data", (chunk) => chunks.push(chunk));
fileStream.on("error", reject);
fileStream.on("end", () => resolve(Buffer.concat(chunks)));
});
return fileBuffer;
});
return new Response(content, {
headers: {
"Content-Type": "image/jpeg",
},
});
};
| chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/avatar.jpg/+server.ts/0 | {
"file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/avatar.jpg/+server.ts",
"repo_id": "chat-ui",
"token_count": 385
} | 111 |
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none">
<path
fill="#2063EC"
d="M4 15.55C4 9.72 8.72 5 14.55 5h4.11a9.34 9.34 0 1 1 0 18.68H7.58l-2.89 2.8a.41.41 0 0 1-.69-.3V15.55Z"
/>
</svg>
| chat-ui/static/chatui/logo.svg/0 | {
"file_path": "chat-ui/static/chatui/logo.svg",
"repo_id": "chat-ui",
"token_count": 125
} | 112 |
# ভূমিকা
হাগিং ফেস কোর্সে স্বাগতম! এই অধ্যায়টি একটি "ওয়ার্কিং এনভায়রনমেন্ট" সেট আপ করতে আপনাকে গাইড করবে। আপনি যদি এইপ্রথম কোর্সটি শুরু করে থাকেন, আমরা আপনাকে প্রথমে [অধ্যায় 1](/course/chapter1) একবার দেখে পড়ে আসার পরামর্শ দিচ্ছি, ফিরে এসে "ওয়ার্কিং এনভায়রনমেন্ট" সেট আপ করুন যাতে আপনি নিজেই কোডটি চেষ্টা করতে পারেন।
এই কোর্সে আমরা যে সমস্ত লাইব্রেরিগুলি ব্যবহার করব সেগুলি পাইথন প্যাকেজ হিসাবে পাওয়া যাবে, তাই এখানে আমরা আপনাকে দেখাব কিভাবে একটি পাইথন এনভায়রনমেন্ট সেট আপ করতে হয় এবং আপনার প্রয়োজনীয় নির্দিষ্ট লাইব্রেরিগুলি ইনস্টল করতে হয়৷
Colab নোটবুক বা পাইথন virtual environment ব্যবহার করে আমরা "ওয়ার্কিং এনভায়রনমেন্ট" সেট-আপ করার দুটি উপায় কভার করব। যে পদ্ধতিটি আপনার কাছে সহজ সেটি আপনি বেছে নিতে পাড়েন। যারা নতুন শুরু করছেন তাদের জন্য আমরা Colab নোটবুক ব্যবহার করে শুরু করতে জোরালোভাবে রিকমেন্ড করি।
মনে রাখবেন যে এখানে উইন্ডোজ সিস্টেম কভার করা হবে না। আপনি যদি উইন্ডোজ চালান, তাহলে আমরা Colab নোটবুক ব্যবহার করে ফলো করার পরামর্শ দিচ্ছি। আর আপনি যদি লিনাক্স ডিস্ট্রিবিউশন বা ম্যাকওএস ব্যবহার করেন তবে এখানে বর্ণিত পদ্ধতির যেকোনো একটি ব্যবহার করতে পারেন।
কোর্সের অনেকটাই হাগিং ফেস অ্যাকাউন্ট উপর নির্ভর করবে। তাই আমরা একটি একাউন্ট ওপেন করার করার পরামর্শ দিচ্ছি: [একটি অ্যাকাউন্ট তৈরি করুন](https://huggingface.co/join)।
## Google Colab নোটবুক ব্যবহার করার পদ্ধতি
Colab নোটবুক ব্যবহার করার সবচেয়ে সহজ সেটআপ হচ্ছে ব্রাউজারে একটি নোটবুক ওপেন করুন এবং সরাসরি কোডিং এ যান!
আপনি যদি Colab-এর সাথে পরিচিত না হন তাহলে আমরা আপনাকে [Colab পরিচয়](https://colab.research.google.com/notebooks/intro.ipynb) অনুসরণ করে শুরু করার পরামর্শ দিচ্ছি। Colab আপনাকে কিছু এক্সেলারেসন হার্ডওয়্যার ব্যবহার করতে দেয়, যেমন GPUs বা TPUs যা ছোট ওয়ার্ক লোডের জন্য ফ্রি।
Colab-এর উপর আপানার হাত চলে আসলে একটি নতুন নোটবুক ওপেন করে সেট-আপ শুরু করুন:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/new_colab.png" alt="An empty colab notebook" width="80%"/>
</div>
পরবর্তী ধাপে আমরা এই কোর্সে ব্যবহার হবে এমন লাইব্রেরিগুলি ইনস্টল করা দেখাবো। আমরা ইনস্টলেশনের জন্য পাইথনের প্যাকেজ ম্যানেজার `pip` ব্যবহার করব। নোটবুকগুলিতে, আপনি `!` অক্ষর দিয়ে আগে সিস্টেম কমান্ড চালাতে পারবেন। যেমন ধরুন, নিচের কমান্ডটি দিয়ে 🤗 Transformers লাইব্রেরি ইনস্টল করতে পারবেন:
```
!pip install transformers
```
প্যাকেজটি আপনার পাইথন রানটাইমের মধ্যে সঠিকভাবে ইনস্টল করা হয়েছে কিনা তা import করে নিশ্চিত হতে পাড়েন।
```
import transformers
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/install.gif" alt="একটি gif উপরের দুটি কমান্ডের ফলাফল দেখাচ্ছে: installation and import" width="80%"/>
</div>
এটি 🤗 ট্রান্সফরমারের একটি খুব লাইট ভার্সন ইনস্টল করে। বিশেষ করে, যদিনা নির্দিষ্ট মেশিন লার্নিং ফ্রেমওয়ার্ক (যেমন PyTorch বা TensorFlow) ইনস্টল করা থাকে। যেহেতু আমরা লাইব্রেরির বিভিন্ন ফিচার ব্যবহার করব, তাই আমরা ডেভেলপমেন্ট ভার্সন ইনস্টল করার পরামর্শ দিচ্ছি, যতে ধারানা করার এমন সব ইউজ কেসে কাজ করবে:
```
!pip install transformers[sentencepiece]
```
ইনস্টল হতে কিছুটা সময় লাগবে, কিন্তু এরপর আপনি বাকি কোর্সের জন্য প্রস্তুত হয়ে যাবেন!
## একটি পাইথন virtual environment ব্যবহার করা
আপনি যদি পাইথন virtual environment ব্যবহার করতে পছন্দ করেন, প্রথম ধাপ হল আপনার সিস্টেমে পাইথন ইনস্টল করা। শুরু করার জন্য আমরা [এই নির্দেশিকা](https://realpython.com/installing-python/) অনুসরণ করার পরামর্শ দিচ্ছি।
একবার আপনি পাইথন ইনস্টল করলে, আপনি আপনার টার্মিনালে পাইথন কমান্ড চালাতে সক্ষম হবেন। পরবর্তী ধাপে যাওয়ার আগে এটি সঠিকভাবে ইনস্টল করা হয়েছে তা নিশ্চিত করতে আপনি নিম্নলিখিত কমান্ডটি চালিয়ে শুরু করতে পারেন: `python --version`। এটি আপনার সিস্টেমে ইনস্টল হওয়া পাইথন সংস্করণটি প্রিন্ট করা উচিত।
আপনার টার্মিনালে পাইথন কমান্ড চালানোর সময়, যেমন `python --version`, আপানাকে ভাবতে হবে যে এটি "main" পাইথন প্রোগ্রাম যা আপানার কমান্ড টিকে রান করছে। আমরা এই মূল ইনস্টলেশনটিকে যেকোন প্যাকেজ ইনস্টল থেকে মুক্ত রাখার সুপারিশ করি। এ আপনি যখন আলাদা অ্যাপ্লিকেশনে কাজ করবেন তখন তার জন্য আলাদা virtual environment তৈরি করতে এই পাইথন ইনস্টলেশনটিকে ব্যবহার করবেন। এতে করে প্রতিটি অ্যাপ্লিকেশনের নিজস্ব ডিপেন্ডেন্সি এবং প্যাকেজ আলাদা থাকবে এবং অন্যান্য অ্যাপ্লিকেশনের সাথে এর সম্ভাব্য কম্পাটিবিলটি নিয়ে আপানকে সমস্যায় করতে হবে না।
পাইথনে এটি [*virtual environments*](https://docs.python.org/3/tutorial/venv.html) দিয়ে করা হয়, যেটি স্বয়ংসম্পূর্ণ ডিরেক্টরি ট্রি। যার প্রত্যেকটিতে এপ্লিকেশনের প্রয়োজনীয় সমস্ত প্যাকেজের পাশাপাশি একটি নির্দিষ্ট পাইথন ভার্শনের পাইথন ইনস্টলেশন আছে। এই ধরনের একটি virtual environments বিভিন্ন ভাবে তৈরি করা যেতে পারে। তবে আমরা এর জন্য অফিসিয়াল পাইথন প্যাকেজ ব্যবহার করব, যাকে বলা হয় [`venv`](https://docs.python.org/3/library) /venv.html#module-venv)।
প্রথমে, আপনি যে ডিরেক্টরিটি আপনার অ্যাপ্লিকেশনটিতে রাখতে চান তা তৈরি করুন — উদাহরণস্বরূপ, আপনি আপনার হোম ডিরেক্টরির বা ফোল্ডার ভেতর *transformers-course* নামে একটি নতুন ডিরেক্টরি তৈরি করতে চাইতে পারেন:
```
mkdir ~/transformers-course
cd ~/transformers-course
```
এই ডিরেক্টরির ভিতর থেকে, পাইথন `venv` মডিউল ব্যবহার করে একটি virtual environment তৈরি করুন:
```
python -m venv .env
```
আপনার এখন *.env* নামে একটি ফোল্ডার থাকা উচিত, অন্যথায় খালি ফোল্ডার :
```
ls -a
```
```out
. .. .env
```
আপনি এখন virtual environment টি `activate` করতে বা `deactivate` নিচের কমান্ড গুলো ব্যবহার করতে পারেন।
```
# virtual environment টি activate করার কমান্ড
source .env/bin/activate
# virtual environment টি deactivate করার কমান্ড
source .env/bin/deactivate
```
`which python` কমান্ড চালিয়ে নিশ্চিত করতে পারেন যে virtual environment টি activate হয়েছে কিনা।
যদি এটি virtual environment টি কে পয়েন্ট করে করে, তাহলে আপনি সফলভাবে এটি সক্রিয় করেছেন!
```
which python
```
```out
/home/<user>/transformers-course/.env/bin/python
```
### ডিপেন্ডেন্সি ইনস্টল করা
আগের সেকশনে Google Colab এ যেভাবে প্যাকেজ ইনস্টল করা হয়েছে একই ভাবে এখানেও `pip` প্যাকেজ ম্যানেজার ব্যবহার করে 🤗 Transformer এর development সংস্করণ ইনস্টল করতে পারেন:
````
pip install "transformers[sentencepiece]"
````
আপনি এখন শুরু করা জন্য সম্পূর্ণ প্রস্তুত!
| course/chapters/bn/chapter0/1.mdx/0 | {
"file_path": "course/chapters/bn/chapter0/1.mdx",
"repo_id": "course",
"token_count": 8946
} | 113 |
<FrameworkSwitchCourse {fw} />
# Einführung
<CourseFloatingBanner
chapter={3}
classNames="absolute z-10 right-0 top-0"
/>
In [Kapitel 2](/course/chapter2) haben wir behandelt, wie man Tokenizer und vortrainierte Modelle verwendet, um Vorhersagen zu treffen. Was passiert aber, wenn wir ein vortrainiertes Modell für unseren eigenen Datensatz optimieren möchten? Das ist das Thema dieses Kapitels! Folgendes wirst du lernen:
{#if fw === 'pt'}
* Wie bereitet man einen großen Datensatz aus dem Hub vor?
* Wie nutzt man die höhere `Trainer` API um Modelle zu fein-tunen?
* Wie implementiert man eine benutzerdefinierte Trainingsschleife
* Wie nutzen wir die 🤗 Accelerate Bibliothek für benutzerdefinierte Trainingschleifen auf verteilten Systemen
{:else}
* Wie bereitet man einen großen Datensatz aus dem Hub vor?
* Wie nutzt man Keras um Modelle zu fein-tunen?
* Wie setzt man Keras für Vorhersagen ein?
* Wie implementiert benutzerdefinierte Metriken?
{/if}
Um deine trainierten Checkpoints auf den Hugging Face Hub hochzuladen, benötigst du ein huggingface.co-Konto: [Erstelle ein Konto](https://huggingface.co/join) | course/chapters/de/chapter3/1.mdx/0 | {
"file_path": "course/chapters/de/chapter3/1.mdx",
"repo_id": "course",
"token_count": 443
} | 114 |
# Introduction[[introduction]]
<CourseFloatingBanner
chapter={1}
classNames="absolute z-10 right-0 top-0"
/>
## Welcome to the 🤗 Course![[welcome-to-the-course]]
<Youtube id="00GKzGyWFEs" />
This course will teach you about natural language processing (NLP) using libraries from the [Hugging Face](https://huggingface.co/) ecosystem — [🤗 Transformers](https://github.com/huggingface/transformers), [🤗 Datasets](https://github.com/huggingface/datasets), [🤗 Tokenizers](https://github.com/huggingface/tokenizers), and [🤗 Accelerate](https://github.com/huggingface/accelerate) — as well as the [Hugging Face Hub](https://huggingface.co/models). It's completely free and without ads.
## What to expect?[[what-to-expect]]
Here is a brief overview of the course:
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Brief overview of the chapters of the course.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Brief overview of the chapters of the course.">
</div>
- Chapters 1 to 4 provide an introduction to the main concepts of the 🤗 Transformers library. By the end of this part of the course, you will be familiar with how Transformer models work and will know how to use a model from the [Hugging Face Hub](https://huggingface.co/models), fine-tune it on a dataset, and share your results on the Hub!
- Chapters 5 to 8 teach the basics of 🤗 Datasets and 🤗 Tokenizers before diving into classic NLP tasks. By the end of this part, you will be able to tackle the most common NLP problems by yourself.
- Chapters 9 to 12 go beyond NLP, and explore how Transformer models can be used to tackle tasks in speech processing and computer vision. Along the way, you'll learn how to build and share demos of your models, and optimize them for production environments. By the end of this part, you will be ready to apply 🤗 Transformers to (almost) any machine learning problem!
This course:
* Requires a good knowledge of Python
* Is better taken after an introductory deep learning course, such as [fast.ai's](https://www.fast.ai/) [Practical Deep Learning for Coders](https://course.fast.ai/) or one of the programs developed by [DeepLearning.AI](https://www.deeplearning.ai/)
* Does not expect prior [PyTorch](https://pytorch.org/) or [TensorFlow](https://www.tensorflow.org/) knowledge, though some familiarity with either of those will help
After you've completed this course, we recommend checking out DeepLearning.AI's [Natural Language Processing Specialization](https://www.coursera.org/specializations/natural-language-processing?utm_source=deeplearning-ai&utm_medium=institutions&utm_campaign=20211011-nlp-2-hugging_face-page-nlp-refresh), which covers a wide range of traditional NLP models like naive Bayes and LSTMs that are well worth knowing about!
## Who are we?[[who-are-we]]
About the authors:
[**Abubakar Abid**](https://huggingface.co/abidlabs) completed his PhD at Stanford in applied machine learning. During his PhD, he founded [Gradio](https://github.com/gradio-app/gradio), an open-source Python library that has been used to build over 600,000 machine learning demos. Gradio was acquired by Hugging Face, which is where Abubakar now serves as a machine learning team lead.
[**Matthew Carrigan**](https://huggingface.co/Rocketknight1) is a Machine Learning Engineer at Hugging Face. He lives in Dublin, Ireland and previously worked as an ML engineer at Parse.ly and before that as a post-doctoral researcher at Trinity College Dublin. He does not believe we're going to get to AGI by scaling existing architectures, but has high hopes for robot immortality regardless.
[**Lysandre Debut**](https://huggingface.co/lysandre) is a Machine Learning Engineer at Hugging Face and has been working on the 🤗 Transformers library since the very early development stages. His aim is to make NLP accessible for everyone by developing tools with a very simple API.
[**Sylvain Gugger**](https://huggingface.co/sgugger) is a Research Engineer at Hugging Face and one of the core maintainers of the 🤗 Transformers library. Previously he was a Research Scientist at fast.ai, and he co-wrote _[Deep Learning for Coders with fastai and PyTorch](https://learning.oreilly.com/library/view/deep-learning-for/9781492045519/)_ with Jeremy Howard. The main focus of his research is on making deep learning more accessible, by designing and improving techniques that allow models to train fast on limited resources.
[**Dawood Khan**](https://huggingface.co/dawoodkhan82) is a Machine Learning Engineer at Hugging Face. He's from NYC and graduated from New York University studying Computer Science. After working as an iOS Engineer for a few years, Dawood quit to start Gradio with his fellow co-founders. Gradio was eventually acquired by Hugging Face.
[**Merve Noyan**](https://huggingface.co/merve) is a developer advocate at Hugging Face, working on developing tools and building content around them to democratize machine learning for everyone.
[**Lucile Saulnier**](https://huggingface.co/SaulLu) is a machine learning engineer at Hugging Face, developing and supporting the use of open source tools. She is also actively involved in many research projects in the field of Natural Language Processing such as collaborative training and BigScience.
[**Lewis Tunstall**](https://huggingface.co/lewtun) is a machine learning engineer at Hugging Face, focused on developing open-source tools and making them accessible to the wider community. He is also a co-author of the O’Reilly book [Natural Language Processing with Transformers](https://www.oreilly.com/library/view/natural-language-processing/9781098136789/).
[**Leandro von Werra**](https://huggingface.co/lvwerra) is a machine learning engineer in the open-source team at Hugging Face and also a co-author of the O’Reilly book [Natural Language Processing with Transformers](https://www.oreilly.com/library/view/natural-language-processing/9781098136789/). He has several years of industry experience bringing NLP projects to production by working across the whole machine learning stack..
## FAQ[[faq]]
Here are some answers to frequently asked questions:
- **Does taking this course lead to a certification?**
Currently we do not have any certification for this course. However, we are working on a certification program for the Hugging Face ecosystem -- stay tuned!
- **How much time should I spend on this course?**
Each chapter in this course is designed to be completed in 1 week, with approximately 6-8 hours of work per week. However, you can take as much time as you need to complete the course.
- **Where can I ask a question if I have one?**
If you have a question about any section of the course, just click on the "*Ask a question*" banner at the top of the page to be automatically redirected to the right section of the [Hugging Face forums](https://discuss.huggingface.co/):
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/forum-button.png" alt="Link to the Hugging Face forums" width="75%">
Note that a list of [project ideas](https://discuss.huggingface.co/c/course/course-event/25) is also available on the forums if you wish to practice more once you have completed the course.
- **Where can I get the code for the course?**
For each section, click on the banner at the top of the page to run the code in either Google Colab or Amazon SageMaker Studio Lab:
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/notebook-buttons.png" alt="Link to the Hugging Face course notebooks" width="75%">
The Jupyter notebooks containing all the code from the course are hosted on the [`huggingface/notebooks`](https://github.com/huggingface/notebooks) repo. If you wish to generate them locally, check out the instructions in the [`course`](https://github.com/huggingface/course#-jupyter-notebooks) repo on GitHub.
- **How can I contribute to the course?**
There are many ways to contribute to the course! If you find a typo or a bug, please open an issue on the [`course`](https://github.com/huggingface/course) repo. If you would like to help translate the course into your native language, check out the instructions [here](https://github.com/huggingface/course#translating-the-course-into-your-language).
- ** What were the choices made for each translation?**
Each translation has a glossary and `TRANSLATING.txt` file that details the choices that were made for machine learning jargon etc. You can find an example for German [here](https://github.com/huggingface/course/blob/main/chapters/de/TRANSLATING.txt).
- **Can I reuse this course?**
Of course! The course is released under the permissive [Apache 2 license](https://www.apache.org/licenses/LICENSE-2.0.html). This means that you must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. If you would like to cite the course, please use the following BibTeX:
```
@misc{huggingfacecourse,
author = {Hugging Face},
title = {The Hugging Face Course, 2022},
howpublished = "\url{https://huggingface.co/course}",
year = {2022},
note = "[Online; accessed <today>]"
}
```
## Let's Go
Are you ready to roll? In this chapter, you will learn:
* How to use the `pipeline()` function to solve NLP tasks such as text generation and classification
* About the Transformer architecture
* How to distinguish between encoder, decoder, and encoder-decoder architectures and use cases
| course/chapters/en/chapter1/1.mdx/0 | {
"file_path": "course/chapters/en/chapter1/1.mdx",
"repo_id": "course",
"token_count": 2670
} | 115 |
# Basic usage completed![[basic-usage-completed]]
<CourseFloatingBanner
chapter={2}
classNames="absolute z-10 right-0 top-0"
/>
Great job following the course up to here! To recap, in this chapter you:
- Learned the basic building blocks of a Transformer model.
- Learned what makes up a tokenization pipeline.
- Saw how to use a Transformer model in practice.
- Learned how to leverage a tokenizer to convert text to tensors that are understandable by the model.
- Set up a tokenizer and a model together to get from text to predictions.
- Learned the limitations of input IDs, and learned about attention masks.
- Played around with versatile and configurable tokenizer methods.
From now on, you should be able to freely navigate the 🤗 Transformers docs: the vocabulary will sound familiar, and you've already seen the methods that you'll use the majority of the time.
| course/chapters/en/chapter2/7.mdx/0 | {
"file_path": "course/chapters/en/chapter2/7.mdx",
"repo_id": "course",
"token_count": 218
} | 116 |
# What if my dataset isn't on the Hub?[[what-if-my-dataset-isnt-on-the-hub]]
<CourseFloatingBanner chapter={5}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter5/section2.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter5/section2.ipynb"},
]} />
You know how to use the [Hugging Face Hub](https://huggingface.co/datasets) to download datasets, but you'll often find yourself working with data that is stored either on your laptop or on a remote server. In this section we'll show you how 🤗 Datasets can be used to load datasets that aren't available on the Hugging Face Hub.
<Youtube id="HyQgpJTkRdE"/>
## Working with local and remote datasets[[working-with-local-and-remote-datasets]]
🤗 Datasets provides loading scripts to handle the loading of local and remote datasets. It supports several common data formats, such as:
| Data format | Loading script | Example |
| :----------------: | :------------: | :-----------------------------------------------------: |
| CSV & TSV | `csv` | `load_dataset("csv", data_files="my_file.csv")` |
| Text files | `text` | `load_dataset("text", data_files="my_file.txt")` |
| JSON & JSON Lines | `json` | `load_dataset("json", data_files="my_file.jsonl")` |
| Pickled DataFrames | `pandas` | `load_dataset("pandas", data_files="my_dataframe.pkl")` |
As shown in the table, for each data format we just need to specify the type of loading script in the `load_dataset()` function, along with a `data_files` argument that specifies the path to one or more files. Let's start by loading a dataset from local files; later we'll see how to do the same with remote files.
## Loading a local dataset[[loading-a-local-dataset]]
For this example we'll use the [SQuAD-it dataset](https://github.com/crux82/squad-it/), which is a large-scale dataset for question answering in Italian.
The training and test splits are hosted on GitHub, so we can download them with a simple `wget` command:
```python
!wget https://github.com/crux82/squad-it/raw/master/SQuAD_it-train.json.gz
!wget https://github.com/crux82/squad-it/raw/master/SQuAD_it-test.json.gz
```
This will download two compressed files called *SQuAD_it-train.json.gz* and *SQuAD_it-test.json.gz*, which we can decompress with the Linux `gzip` command:
```python
!gzip -dkv SQuAD_it-*.json.gz
```
```bash
SQuAD_it-test.json.gz: 87.4% -- replaced with SQuAD_it-test.json
SQuAD_it-train.json.gz: 82.2% -- replaced with SQuAD_it-train.json
```
We can see that the compressed files have been replaced with _SQuAD_it-train.json_ and _SQuAD_it-test.json_, and that the data is stored in the JSON format.
<Tip>
✎ If you're wondering why there's a `!` character in the above shell commands, that's because we're running them within a Jupyter notebook. Simply remove the prefix if you want to download and unzip the dataset within a terminal.
</Tip>
To load a JSON file with the `load_dataset()` function, we just need to know if we're dealing with ordinary JSON (similar to a nested dictionary) or JSON Lines (line-separated JSON). Like many question answering datasets, SQuAD-it uses the nested format, with all the text stored in a `data` field. This means we can load the dataset by specifying the `field` argument as follows:
```py
from datasets import load_dataset
squad_it_dataset = load_dataset("json", data_files="SQuAD_it-train.json", field="data")
```
By default, loading local files creates a `DatasetDict` object with a `train` split. We can see this by inspecting the `squad_it_dataset` object:
```py
squad_it_dataset
```
```python out
DatasetDict({
train: Dataset({
features: ['title', 'paragraphs'],
num_rows: 442
})
})
```
This shows us the number of rows and the column names associated with the training set. We can view one of the examples by indexing into the `train` split as follows:
```py
squad_it_dataset["train"][0]
```
```python out
{
"title": "Terremoto del Sichuan del 2008",
"paragraphs": [
{
"context": "Il terremoto del Sichuan del 2008 o il terremoto...",
"qas": [
{
"answers": [{"answer_start": 29, "text": "2008"}],
"id": "56cdca7862d2951400fa6826",
"question": "In quale anno si è verificato il terremoto nel Sichuan?",
},
...
],
},
...
],
}
```
Great, we've loaded our first local dataset! But while this worked for the training set, what we really want is to include both the `train` and `test` splits in a single `DatasetDict` object so we can apply `Dataset.map()` functions across both splits at once. To do this, we can provide a dictionary to the `data_files` argument that maps each split name to a file associated with that split:
```py
data_files = {"train": "SQuAD_it-train.json", "test": "SQuAD_it-test.json"}
squad_it_dataset = load_dataset("json", data_files=data_files, field="data")
squad_it_dataset
```
```python out
DatasetDict({
train: Dataset({
features: ['title', 'paragraphs'],
num_rows: 442
})
test: Dataset({
features: ['title', 'paragraphs'],
num_rows: 48
})
})
```
This is exactly what we wanted. Now, we can apply various preprocessing techniques to clean up the data, tokenize the reviews, and so on.
<Tip>
The `data_files` argument of the `load_dataset()` function is quite flexible and can be either a single file path, a list of file paths, or a dictionary that maps split names to file paths. You can also glob files that match a specified pattern according to the rules used by the Unix shell (e.g., you can glob all the JSON files in a directory as a single split by setting `data_files="*.json"`). See the 🤗 Datasets [documentation](https://huggingface.co/docs/datasets/loading#local-and-remote-files) for more details.
</Tip>
The loading scripts in 🤗 Datasets actually support automatic decompression of the input files, so we could have skipped the use of `gzip` by pointing the `data_files` argument directly to the compressed files:
```py
data_files = {"train": "SQuAD_it-train.json.gz", "test": "SQuAD_it-test.json.gz"}
squad_it_dataset = load_dataset("json", data_files=data_files, field="data")
```
This can be useful if you don't want to manually decompress many GZIP files. The automatic decompression also applies to other common formats like ZIP and TAR, so you just need to point `data_files` to the compressed files and you're good to go!
Now that you know how to load local files on your laptop or desktop, let's take a look at loading remote files.
## Loading a remote dataset[[loading-a-remote-dataset]]
If you're working as a data scientist or coder in a company, there's a good chance the datasets you want to analyze are stored on some remote server. Fortunately, loading remote files is just as simple as loading local ones! Instead of providing a path to local files, we point the `data_files` argument of `load_dataset()` to one or more URLs where the remote files are stored. For example, for the SQuAD-it dataset hosted on GitHub, we can just point `data_files` to the _SQuAD_it-*.json.gz_ URLs as follows:
```py
url = "https://github.com/crux82/squad-it/raw/master/"
data_files = {
"train": url + "SQuAD_it-train.json.gz",
"test": url + "SQuAD_it-test.json.gz",
}
squad_it_dataset = load_dataset("json", data_files=data_files, field="data")
```
This returns the same `DatasetDict` object obtained above, but saves us the step of manually downloading and decompressing the _SQuAD_it-*.json.gz_ files. This wraps up our foray into the various ways to load datasets that aren't hosted on the Hugging Face Hub. Now that we've got a dataset to play with, let's get our hands dirty with various data-wrangling techniques!
<Tip>
✏️ **Try it out!** Pick another dataset hosted on GitHub or the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php) and try loading it both locally and remotely using the techniques introduced above. For bonus points, try loading a dataset that’s stored in a CSV or text format (see the [documentation](https://huggingface.co/docs/datasets/loading#local-and-remote-files) for more information on these formats).
</Tip>
| course/chapters/en/chapter5/2.mdx/0 | {
"file_path": "course/chapters/en/chapter5/2.mdx",
"repo_id": "course",
"token_count": 2926
} | 117 |
# Building a tokenizer, block by block[[building-a-tokenizer-block-by-block]]
<CourseFloatingBanner chapter={6}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter6/section8.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter6/section8.ipynb"},
]} />
As we've seen in the previous sections, tokenization comprises several steps:
- Normalization (any cleanup of the text that is deemed necessary, such as removing spaces or accents, Unicode normalization, etc.)
- Pre-tokenization (splitting the input into words)
- Running the input through the model (using the pre-tokenized words to produce a sequence of tokens)
- Post-processing (adding the special tokens of the tokenizer, generating the attention mask and token type IDs)
As a reminder, here's another look at the overall process:
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter6/tokenization_pipeline.svg" alt="The tokenization pipeline.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter6/tokenization_pipeline-dark.svg" alt="The tokenization pipeline.">
</div>
The 🤗 Tokenizers library has been built to provide several options for each of those steps, which you can mix and match together. In this section we'll see how we can build a tokenizer from scratch, as opposed to training a new tokenizer from an old one as we did in [section 2](/course/chapter6/2). You'll then be able to build any kind of tokenizer you can think of!
<Youtube id="MR8tZm5ViWU"/>
More precisely, the library is built around a central `Tokenizer` class with the building blocks regrouped in submodules:
- `normalizers` contains all the possible types of `Normalizer` you can use (complete list [here](https://huggingface.co/docs/tokenizers/api/normalizers)).
- `pre_tokenizers` contains all the possible types of `PreTokenizer` you can use (complete list [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers)).
- `models` contains the various types of `Model` you can use, like `BPE`, `WordPiece`, and `Unigram` (complete list [here](https://huggingface.co/docs/tokenizers/api/models)).
- `trainers` contains all the different types of `Trainer` you can use to train your model on a corpus (one per type of model; complete list [here](https://huggingface.co/docs/tokenizers/api/trainers)).
- `post_processors` contains the various types of `PostProcessor` you can use (complete list [here](https://huggingface.co/docs/tokenizers/api/post-processors)).
- `decoders` contains the various types of `Decoder` you can use to decode the outputs of tokenization (complete list [here](https://huggingface.co/docs/tokenizers/components#decoders)).
You can find the whole list of building blocks [here](https://huggingface.co/docs/tokenizers/components).
## Acquiring a corpus[[acquiring-a-corpus]]
To train our new tokenizer, we will use a small corpus of text (so the examples run fast). The steps for acquiring the corpus are similar to the ones we took at the [beginning of this chapter](/course/chapter6/2), but this time we'll use the [WikiText-2](https://huggingface.co/datasets/wikitext) dataset:
```python
from datasets import load_dataset
dataset = load_dataset("wikitext", name="wikitext-2-raw-v1", split="train")
def get_training_corpus():
for i in range(0, len(dataset), 1000):
yield dataset[i : i + 1000]["text"]
```
The function `get_training_corpus()` is a generator that will yield batches of 1,000 texts, which we will use to train the tokenizer.
🤗 Tokenizers can also be trained on text files directly. Here's how we can generate a text file containing all the texts/inputs from WikiText-2 that we can use locally:
```python
with open("wikitext-2.txt", "w", encoding="utf-8") as f:
for i in range(len(dataset)):
f.write(dataset[i]["text"] + "\n")
```
Next we'll show you how to build your own BERT, GPT-2, and XLNet tokenizers, block by block. That will give us an example of each of the three main tokenization algorithms: WordPiece, BPE, and Unigram. Let's start with BERT!
## Building a WordPiece tokenizer from scratch[[building-a-wordpiece-tokenizer-from-scratch]]
To build a tokenizer with the 🤗 Tokenizers library, we start by instantiating a `Tokenizer` object with a `model`, then set its `normalizer`, `pre_tokenizer`, `post_processor`, and `decoder` attributes to the values we want.
For this example, we'll create a `Tokenizer` with a WordPiece model:
```python
from tokenizers import (
decoders,
models,
normalizers,
pre_tokenizers,
processors,
trainers,
Tokenizer,
)
tokenizer = Tokenizer(models.WordPiece(unk_token="[UNK]"))
```
We have to specify the `unk_token` so the model knows what to return when it encounters characters it hasn't seen before. Other arguments we can set here include the `vocab` of our model (we're going to train the model, so we don't need to set this) and `max_input_chars_per_word`, which specifies a maximum length for each word (words longer than the value passed will be split).
The first step of tokenization is normalization, so let's begin with that. Since BERT is widely used, there is a `BertNormalizer` with the classic options we can set for BERT: `lowercase` and `strip_accents`, which are self-explanatory; `clean_text` to remove all control characters and replace repeating spaces with a single one; and `handle_chinese_chars`, which places spaces around Chinese characters. To replicate the `bert-base-uncased` tokenizer, we can just set this normalizer:
```python
tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True)
```
Generally speaking, however, when building a new tokenizer you won't have access to such a handy normalizer already implemented in the 🤗 Tokenizers library -- so let's see how to create the BERT normalizer by hand. The library provides a `Lowercase` normalizer and a `StripAccents` normalizer, and you can compose several normalizers using a `Sequence`:
```python
tokenizer.normalizer = normalizers.Sequence(
[normalizers.NFD(), normalizers.Lowercase(), normalizers.StripAccents()]
)
```
We're also using an `NFD` Unicode normalizer, as otherwise the `StripAccents` normalizer won't properly recognize the accented characters and thus won't strip them out.
As we've seen before, we can use the `normalize_str()` method of the `normalizer` to check out the effects it has on a given text:
```python
print(tokenizer.normalizer.normalize_str("Héllò hôw are ü?"))
```
```python out
hello how are u?
```
<Tip>
**To go further** If you test the two versions of the previous normalizers on a string containing the unicode character `u"\u0085"` you will surely notice that these two normalizers are not exactly equivalent.
To not over-complicate the version with `normalizers.Sequence` too much , we haven't included the Regex replacements that the `BertNormalizer` requires when the `clean_text` argument is set to `True` - which is the default behavior. But don't worry: it is possible to get exactly the same normalization without using the handy `BertNormalizer` by adding two `normalizers.Replace`'s to the normalizers sequence.
</Tip>
Next is the pre-tokenization step. Again, there is a prebuilt `BertPreTokenizer` that we can use:
```python
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
```
Or we can build it from scratch:
```python
tokenizer.pre_tokenizer = pre_tokenizers.Whitespace()
```
Note that the `Whitespace` pre-tokenizer splits on whitespace and all characters that are not letters, digits, or the underscore character, so it technically splits on whitespace and punctuation:
```python
tokenizer.pre_tokenizer.pre_tokenize_str("Let's test my pre-tokenizer.")
```
```python out
[('Let', (0, 3)), ("'", (3, 4)), ('s', (4, 5)), ('test', (6, 10)), ('my', (11, 13)), ('pre', (14, 17)),
('-', (17, 18)), ('tokenizer', (18, 27)), ('.', (27, 28))]
```
If you only want to split on whitespace, you should use the `WhitespaceSplit` pre-tokenizer instead:
```python
pre_tokenizer = pre_tokenizers.WhitespaceSplit()
pre_tokenizer.pre_tokenize_str("Let's test my pre-tokenizer.")
```
```python out
[("Let's", (0, 5)), ('test', (6, 10)), ('my', (11, 13)), ('pre-tokenizer.', (14, 28))]
```
Like with normalizers, you can use a `Sequence` to compose several pre-tokenizers:
```python
pre_tokenizer = pre_tokenizers.Sequence(
[pre_tokenizers.WhitespaceSplit(), pre_tokenizers.Punctuation()]
)
pre_tokenizer.pre_tokenize_str("Let's test my pre-tokenizer.")
```
```python out
[('Let', (0, 3)), ("'", (3, 4)), ('s', (4, 5)), ('test', (6, 10)), ('my', (11, 13)), ('pre', (14, 17)),
('-', (17, 18)), ('tokenizer', (18, 27)), ('.', (27, 28))]
```
The next step in the tokenization pipeline is running the inputs through the model. We already specified our model in the initialization, but we still need to train it, which will require a `WordPieceTrainer`. The main thing to remember when instantiating a trainer in 🤗 Tokenizers is that you need to pass it all the special tokens you intend to use -- otherwise it won't add them to the vocabulary, since they are not in the training corpus:
```python
special_tokens = ["[UNK]", "[PAD]", "[CLS]", "[SEP]", "[MASK]"]
trainer = trainers.WordPieceTrainer(vocab_size=25000, special_tokens=special_tokens)
```
As well as specifying the `vocab_size` and `special_tokens`, we can set the `min_frequency` (the number of times a token must appear to be included in the vocabulary) or change the `continuing_subword_prefix` (if we want to use something different from `##`).
To train our model using the iterator we defined earlier, we just have to execute this command:
```python
tokenizer.train_from_iterator(get_training_corpus(), trainer=trainer)
```
We can also use text files to train our tokenizer, which would look like this (we reinitialize the model with an empty `WordPiece` beforehand):
```python
tokenizer.model = models.WordPiece(unk_token="[UNK]")
tokenizer.train(["wikitext-2.txt"], trainer=trainer)
```
In both cases, we can then test the tokenizer on a text by calling the `encode()` method:
```python
encoding = tokenizer.encode("Let's test this tokenizer.")
print(encoding.tokens)
```
```python out
['let', "'", 's', 'test', 'this', 'tok', '##eni', '##zer', '.']
```
The `encoding` obtained is an `Encoding`, which contains all the necessary outputs of the tokenizer in its various attributes: `ids`, `type_ids`, `tokens`, `offsets`, `attention_mask`, `special_tokens_mask`, and `overflowing`.
The last step in the tokenization pipeline is post-processing. We need to add the `[CLS]` token at the beginning and the `[SEP]` token at the end (or after each sentence, if we have a pair of sentences). We will use a `TemplateProcessor` for this, but first we need to know the IDs of the `[CLS]` and `[SEP]` tokens in the vocabulary:
```python
cls_token_id = tokenizer.token_to_id("[CLS]")
sep_token_id = tokenizer.token_to_id("[SEP]")
print(cls_token_id, sep_token_id)
```
```python out
(2, 3)
```
To write the template for the `TemplateProcessor`, we have to specify how to treat a single sentence and a pair of sentences. For both, we write the special tokens we want to use; the first (or single) sentence is represented by `$A`, while the second sentence (if encoding a pair) is represented by `$B`. For each of these (special tokens and sentences), we also specify the corresponding token type ID after a colon.
The classic BERT template is thus defined as follows:
```python
tokenizer.post_processor = processors.TemplateProcessing(
single=f"[CLS]:0 $A:0 [SEP]:0",
pair=f"[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1",
special_tokens=[("[CLS]", cls_token_id), ("[SEP]", sep_token_id)],
)
```
Note that we need to pass along the IDs of the special tokens, so the tokenizer can properly convert them to their IDs.
Once this is added, going back to our previous example will give:
```python
encoding = tokenizer.encode("Let's test this tokenizer.")
print(encoding.tokens)
```
```python out
['[CLS]', 'let', "'", 's', 'test', 'this', 'tok', '##eni', '##zer', '.', '[SEP]']
```
And on a pair of sentences, we get the proper result:
```python
encoding = tokenizer.encode("Let's test this tokenizer...", "on a pair of sentences.")
print(encoding.tokens)
print(encoding.type_ids)
```
```python out
['[CLS]', 'let', "'", 's', 'test', 'this', 'tok', '##eni', '##zer', '...', '[SEP]', 'on', 'a', 'pair', 'of', 'sentences', '.', '[SEP]']
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
```
We've almost finished building this tokenizer from scratch -- the last step is to include a decoder:
```python
tokenizer.decoder = decoders.WordPiece(prefix="##")
```
Let's test it on our previous `encoding`:
```python
tokenizer.decode(encoding.ids)
```
```python out
"let's test this tokenizer... on a pair of sentences."
```
Great! We can save our tokenizer in a single JSON file like this:
```python
tokenizer.save("tokenizer.json")
```
We can then reload that file in a `Tokenizer` object with the `from_file()` method:
```python
new_tokenizer = Tokenizer.from_file("tokenizer.json")
```
To use this tokenizer in 🤗 Transformers, we have to wrap it in a `PreTrainedTokenizerFast`. We can either use the generic class or, if our tokenizer corresponds to an existing model, use that class (here, `BertTokenizerFast`). If you apply this lesson to build a brand new tokenizer, you will have to use the first option.
To wrap the tokenizer in a `PreTrainedTokenizerFast`, we can either pass the tokenizer we built as a `tokenizer_object` or pass the tokenizer file we saved as `tokenizer_file`. The key thing to remember is that we have to manually set all the special tokens, since that class can't infer from the `tokenizer` object which token is the mask token, the `[CLS]` token, etc.:
```python
from transformers import PreTrainedTokenizerFast
wrapped_tokenizer = PreTrainedTokenizerFast(
tokenizer_object=tokenizer,
# tokenizer_file="tokenizer.json", # You can load from the tokenizer file, alternatively
unk_token="[UNK]",
pad_token="[PAD]",
cls_token="[CLS]",
sep_token="[SEP]",
mask_token="[MASK]",
)
```
If you are using a specific tokenizer class (like `BertTokenizerFast`), you will only need to specify the special tokens that are different from the default ones (here, none):
```python
from transformers import BertTokenizerFast
wrapped_tokenizer = BertTokenizerFast(tokenizer_object=tokenizer)
```
You can then use this tokenizer like any other 🤗 Transformers tokenizer. You can save it with the `save_pretrained()` method, or upload it to the Hub with the `push_to_hub()` method.
Now that we've seen how to build a WordPiece tokenizer, let's do the same for a BPE tokenizer. We'll go a bit faster since you know all the steps, and only highlight the differences.
## Building a BPE tokenizer from scratch[[building-a-bpe-tokenizer-from-scratch]]
Let's now build a GPT-2 tokenizer. Like for the BERT tokenizer, we start by initializing a `Tokenizer` with a BPE model:
```python
tokenizer = Tokenizer(models.BPE())
```
Also like for BERT, we could initialize this model with a vocabulary if we had one (we would need to pass the `vocab` and `merges` in this case), but since we will train from scratch, we don't need to do that. We also don't need to specify an `unk_token` because GPT-2 uses byte-level BPE, which doesn't require it.
GPT-2 does not use a normalizer, so we skip that step and go directly to the pre-tokenization:
```python
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=False)
```
The option we added to `ByteLevel` here is to not add a space at the beginning of a sentence (which is the default otherwise). We can have a look at the pre-tokenization of an example text like before:
```python
tokenizer.pre_tokenizer.pre_tokenize_str("Let's test pre-tokenization!")
```
```python out
[('Let', (0, 3)), ("'s", (3, 5)), ('Ġtest', (5, 10)), ('Ġpre', (10, 14)), ('-', (14, 15)),
('tokenization', (15, 27)), ('!', (27, 28))]
```
Next is the model, which needs training. For GPT-2, the only special token is the end-of-text token:
```python
trainer = trainers.BpeTrainer(vocab_size=25000, special_tokens=["<|endoftext|>"])
tokenizer.train_from_iterator(get_training_corpus(), trainer=trainer)
```
Like with the `WordPieceTrainer`, as well as the `vocab_size` and `special_tokens`, we can specify the `min_frequency` if we want to, or if we have an end-of-word suffix (like `</w>`), we can set it with `end_of_word_suffix`.
This tokenizer can also be trained on text files:
```python
tokenizer.model = models.BPE()
tokenizer.train(["wikitext-2.txt"], trainer=trainer)
```
Let's have a look at the tokenization of a sample text:
```python
encoding = tokenizer.encode("Let's test this tokenizer.")
print(encoding.tokens)
```
```python out
['L', 'et', "'", 's', 'Ġtest', 'Ġthis', 'Ġto', 'ken', 'izer', '.']
```
We apply the byte-level post-processing for the GPT-2 tokenizer as follows:
```python
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
```
The `trim_offsets = False` option indicates to the post-processor that we should leave the offsets of tokens that begin with 'Ġ' as they are: this way the start of the offsets will point to the space before the word, not the first character of the word (since the space is technically part of the token). Let's have a look at the result with the text we just encoded, where `'Ġtest'` is the token at index 4:
```python
sentence = "Let's test this tokenizer."
encoding = tokenizer.encode(sentence)
start, end = encoding.offsets[4]
sentence[start:end]
```
```python out
' test'
```
Finally, we add a byte-level decoder:
```python
tokenizer.decoder = decoders.ByteLevel()
```
and we can double-check it works properly:
```python
tokenizer.decode(encoding.ids)
```
```python out
"Let's test this tokenizer."
```
Great! Now that we're done, we can save the tokenizer like before, and wrap it in a `PreTrainedTokenizerFast` or `GPT2TokenizerFast` if we want to use it in 🤗 Transformers:
```python
from transformers import PreTrainedTokenizerFast
wrapped_tokenizer = PreTrainedTokenizerFast(
tokenizer_object=tokenizer,
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
)
```
or:
```python
from transformers import GPT2TokenizerFast
wrapped_tokenizer = GPT2TokenizerFast(tokenizer_object=tokenizer)
```
As the last example, we'll show you how to build a Unigram tokenizer from scratch.
## Building a Unigram tokenizer from scratch[[building-a-unigram-tokenizer-from-scratch]]
Let's now build an XLNet tokenizer. Like for the previous tokenizers, we start by initializing a `Tokenizer` with a Unigram model:
```python
tokenizer = Tokenizer(models.Unigram())
```
Again, we could initialize this model with a vocabulary if we had one.
For the normalization, XLNet uses a few replacements (which come from SentencePiece):
```python
from tokenizers import Regex
tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Replace("``", '"'),
normalizers.Replace("''", '"'),
normalizers.NFKD(),
normalizers.StripAccents(),
normalizers.Replace(Regex(" {2,}"), " "),
]
)
```
This replaces <code>``</code> and <code>''</code> with <code>"</code> and any sequence of two or more spaces with a single space, as well as removing the accents in the texts to tokenize.
The pre-tokenizer to use for any SentencePiece tokenizer is `Metaspace`:
```python
tokenizer.pre_tokenizer = pre_tokenizers.Metaspace()
```
We can have a look at the pre-tokenization of an example text like before:
```python
tokenizer.pre_tokenizer.pre_tokenize_str("Let's test the pre-tokenizer!")
```
```python out
[("▁Let's", (0, 5)), ('▁test', (5, 10)), ('▁the', (10, 14)), ('▁pre-tokenizer!', (14, 29))]
```
Next is the model, which needs training. XLNet has quite a few special tokens:
```python
special_tokens = ["<cls>", "<sep>", "<unk>", "<pad>", "<mask>", "<s>", "</s>"]
trainer = trainers.UnigramTrainer(
vocab_size=25000, special_tokens=special_tokens, unk_token="<unk>"
)
tokenizer.train_from_iterator(get_training_corpus(), trainer=trainer)
```
A very important argument not to forget for the `UnigramTrainer` is the `unk_token`. We can also pass along other arguments specific to the Unigram algorithm, such as the `shrinking_factor` for each step where we remove tokens (defaults to 0.75) or the `max_piece_length` to specify the maximum length of a given token (defaults to 16).
This tokenizer can also be trained on text files:
```python
tokenizer.model = models.Unigram()
tokenizer.train(["wikitext-2.txt"], trainer=trainer)
```
Let's have a look at the tokenization of a sample text:
```python
encoding = tokenizer.encode("Let's test this tokenizer.")
print(encoding.tokens)
```
```python out
['▁Let', "'", 's', '▁test', '▁this', '▁to', 'ken', 'izer', '.']
```
A peculiarity of XLNet is that it puts the `<cls>` token at the end of the sentence, with a type ID of 2 (to distinguish it from the other tokens). It's padding on the left, as a result. We can deal with all the special tokens and token type IDs with a template, like for BERT, but first we have to get the IDs of the `<cls>` and `<sep>` tokens:
```python
cls_token_id = tokenizer.token_to_id("<cls>")
sep_token_id = tokenizer.token_to_id("<sep>")
print(cls_token_id, sep_token_id)
```
```python out
0 1
```
The template looks like this:
```python
tokenizer.post_processor = processors.TemplateProcessing(
single="$A:0 <sep>:0 <cls>:2",
pair="$A:0 <sep>:0 $B:1 <sep>:1 <cls>:2",
special_tokens=[("<sep>", sep_token_id), ("<cls>", cls_token_id)],
)
```
And we can test it works by encoding a pair of sentences:
```python
encoding = tokenizer.encode("Let's test this tokenizer...", "on a pair of sentences!")
print(encoding.tokens)
print(encoding.type_ids)
```
```python out
['▁Let', "'", 's', '▁test', '▁this', '▁to', 'ken', 'izer', '.', '.', '.', '<sep>', '▁', 'on', '▁', 'a', '▁pair',
'▁of', '▁sentence', 's', '!', '<sep>', '<cls>']
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]
```
Finally, we add a `Metaspace` decoder:
```python
tokenizer.decoder = decoders.Metaspace()
```
and we're done with this tokenizer! We can save the tokenizer like before, and wrap it in a `PreTrainedTokenizerFast` or `XLNetTokenizerFast` if we want to use it in 🤗 Transformers. One thing to note when using `PreTrainedTokenizerFast` is that on top of the special tokens, we need to tell the 🤗 Transformers library to pad on the left:
```python
from transformers import PreTrainedTokenizerFast
wrapped_tokenizer = PreTrainedTokenizerFast(
tokenizer_object=tokenizer,
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
cls_token="<cls>",
sep_token="<sep>",
mask_token="<mask>",
padding_side="left",
)
```
Or alternatively:
```python
from transformers import XLNetTokenizerFast
wrapped_tokenizer = XLNetTokenizerFast(tokenizer_object=tokenizer)
```
Now that you have seen how the various building blocks are used to build existing tokenizers, you should be able to write any tokenizer you want with the 🤗 Tokenizers library and be able to use it in 🤗 Transformers.
| course/chapters/en/chapter6/8.mdx/0 | {
"file_path": "course/chapters/en/chapter6/8.mdx",
"repo_id": "course",
"token_count": 7666
} | 118 |
# How to write a good issue[[how-to-write-a-good-issue]]
<CourseFloatingBanner chapter={8}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter8/section5.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter8/section5.ipynb"},
]} />
When you encounter something that doesn't seem right with one of the Hugging Face libraries, you should definitely let us know so we can fix it (the same goes for any open source library, for that matter). If you are not completely certain whether the bug lies in your own code or one of our libraries, the first place to check is the [forums](https://discuss.huggingface.co/). The community will help you figure this out, and the Hugging Face team also closely watches the discussions there.
<Youtube id="_PAli-V4wj0"/>
When you are sure you have a bug in your hand, the first step is to build a minimal reproducible example.
## Creating a minimal reproducible example[[creating-a-minimal-reproducible-example]]
It's very important to isolate the piece of code that produces the bug, as no one in the Hugging Face team is a magician (yet), and they can't fix what they can't see. A minimal reproducible example should, as the name indicates, be reproducible. This means that it should not rely on any external files or data you may have. Try to replace the data you are using with some dummy values that look like your real ones and still produce the same error.
<Tip>
🚨 Many issues in the 🤗 Transformers repository are unsolved because the data used to reproduce them is not accessible.
</Tip>
Once you have something that is self-contained, you can try to reduce it into even less lines of code, building what we call a _minimal reproducible example_. While this requires a bit more work on your side, you will almost be guaranteed to get help and a fix if you provide a nice, short bug reproducer.
If you feel comfortable enough, go inspect the source code where your bug happens. You might find a solution to your problem (in which case you can even suggest a pull request to fix it), but more generally, this can help the maintainers better understand the source when they read your report.
## Filling out the issue template[[filling-out-the-issue-template]]
When you file your issue, you will notice there is a template to fill out. We will follow the one for [🤗 Transformers issues](https://github.com/huggingface/transformers/issues/new/choose) here, but the same kind of information will be required if you report an issue in another repository. Don't leave the template blank: taking the time to fill it in will maximize your chances of getting an answer and solving your problem.
In general, when filing an issue, always stay courteous. This is an open source project, so you are using free software, and no one has any obligation to help you. You may include what you feel is justified criticism in your issue, but then the maintainers may very well take it badly and not be in a rush help you. Make sure you read the [code of conduct](https://github.com/huggingface/transformers/blob/master/CODE_OF_CONDUCT.md) of the project.
### Including your environment information[[including-your-environment-information]]
🤗 Transformers provides a utility to get all the information we need about your environment. Just type the following in your terminal:
```
transformers-cli env
```
and you should get something like this:
```out
Copy-and-paste the text below in your GitHub issue and FILL OUT the two last points.
- `transformers` version: 4.12.0.dev0
- Platform: Linux-5.10.61-1-MANJARO-x86_64-with-arch-Manjaro-Linux
- Python version: 3.7.9
- PyTorch version (GPU?): 1.8.1+cu111 (True)
- Tensorflow version (GPU?): 2.5.0 (True)
- Flax version (CPU?/GPU?/TPU?): 0.3.4 (cpu)
- Jax version: 0.2.13
- JaxLib version: 0.1.65
- Using GPU in script?: <fill in>
- Using distributed or parallel set-up in script?: <fill in>
```
You can also add a `!` at the beginning of the `transformers-cli env` command to execute it from a notebook cell, and then copy and paste the result at the beginning of your issue.
### Tagging people[[tagging-people]]
Tagging people by typing an `@` followed by their GitHub handle will send them a notification so they will see your issue and might reply quicker. Use this with moderation, because the people you tag might not appreciate being notified if it's something they have no direct link to. If you have looked at the source files related to your bug, you should tag the last person that made changes at the line you think is responsible for your problem (you can find this information by looking at said line on GitHub, selecting it, then clicking "View git blame").
Otherwise, the template offers suggestions of people to tag. In general, never tag more than three people!
### Including a reproducible example[[including-a-reproducible-example]]
If you have managed to create a self-contained example that produces the bug, now is the time to include it! Type a line with three backticks followed by `python`, like this:
```
```python
```
then paste in your minimal reproducible example and type a new line with three backticks. This will ensure your code is properly formatted.
If you didn't manage to create a reproducible example, explain in clear steps how you got to your issue. Include a link to a Google Colab notebook where you got the error if you can. The more information you share, the better able the maintainers will be to reply to you.
In all cases, you should copy and paste the whole error message you are getting. If you're working in Colab, remember that some of the frames may be automatically collapsed in the stack trace, so make sure you expand them before copying. Like with the code sample, put that error message between two lines with three backticks, so it's properly formatted.
### Describing the expected behavior[[describing-the-expected-behavior]]
Explain in a few lines what you expected to get, so that the maintainers get a full grasp of the problem. This part is generally pretty obvious, so it should fit in one sentence, but in some cases you may have a lot to say.
## And then what?[[and-then-what]]
Once your issue is filed, make sure to quickly check everything looks okay. You can edit the issue if you made a mistake, or even change its title if you realize the problem is different from what you initially thought.
There is no point pinging people if you don't get an answer. If no one helps you in a few days, it's likely that no one could make sense of your problem. Don't hesitate to go back to the reproducible example. Can you make it shorter and more to the point? If you don't get an answer in a week, you can leave a message gently asking for help, especially if you've edited your issue to include more information on the problem.
| course/chapters/en/chapter8/5.mdx/0 | {
"file_path": "course/chapters/en/chapter8/5.mdx",
"repo_id": "course",
"token_count": 1791
} | 119 |
- title: 0. Setup
sections:
- local: chapter0/1
title: Introducción
- title: 1. Modelos de Transformadores
sections:
- local: chapter1/1
title: Introducción
- local: chapter1/2
title: Procesamiento de Lenguaje Natural
- local: chapter1/3
title: Transformadores, ¿qué pueden hacer?
- local: chapter1/4
title: ¿Cómo funcionan los Transformadores?
- local: chapter1/5
title: Modelos de codificadores
- local: chapter1/6
title: Modelos de decodificadores
- local: chapter1/7
title: Modelos secuencia a secuencia
- local: chapter1/8
title: Sesgos y limitaciones
- local: chapter1/9
title: Resumen
- local: chapter1/10
title: Quiz de final de capítulo
quiz: 1
- title: 2. Usando Transformers 🤗
sections:
- local: chapter2/4
title: Tokenizadores
- local: chapter2/5
title: Manejando Secuencias Múltiples
- local: chapter2/6
title: Poniendo todo junto
- local: chapter2/7
title: ¡Haz completado el uso básico!
- local: chapter2/8
title: Quiz de final de capítulo
quiz: 2
- title: 3. Ajuste (fine-tuning) de un modelo preentrenado
sections:
- local: chapter3/1
title: Introducción
- local: chapter3/2
title: Procesamiento de los datos
- local: chapter3/3
title: Ajuste de un modelo con la API Trainer
- local: chapter3/3_tf
title: Ajuste de un modelo con Keras
- local: chapter3/4
title: Entrenamiento completo
- local: chapter3/5
title: Ajuste de modelos, ¡hecho!
- local: chapter3/6
title: Quiz de final de capítulo
quiz: 3
- title: 5. La librería 🤗 Datasets
sections:
- local: chapter5/1
title: Introducción
- local: chapter5/2
title: ¿Y si mi dataset no está en el Hub?
- local: chapter5/3
title: Es momento de subdividir
- local: chapter5/4
title: ¿Big data? 🤗 ¡Datasets al rescate!
- local: chapter5/5
title: Crea tu propio dataset
- local: chapter5/6
title: Búsqueda semántica con FAISS
- local: chapter5/7
title: 🤗 Datasets, ¡listo!
- local: chapter5/8
title: Quiz de final de capítulo
quiz: 5
- title: 6. La librería 🤗 Tokenizers
sections:
- local: chapter6/1
title: Introducción
- local: chapter6/2
title: Entrenar un nuevo tokenizador a partir de uno existente
- local: chapter6/3
title: Los poderes especiales de los Tokenizadores Rápidos (Fast tokenizers)
- local: chapter6/3b
title: Tokenizadores Rápidos en un Pipeline de Question-Answering
- local: chapter6/4
title: Normalización y pre-tokenización
- local: chapter6/5
title: Tokenización por Codificación Byte-Pair
- local: chapter6/6
title: Tokenización WordPiece
- local: chapter6/7
title: Tokenización Unigram
- local: chapter6/8
title: Construir un tokenizador, bloque por bloque
- local: chapter6/9
title: Tokenizadores, listo!
- local: chapter6/10
title: Quiz de final de capítulo
quiz: 1
- title: 8. ¿Cómo solicitar ayuda?
sections:
- local: chapter8/1
title: Introducción
- local: chapter8/2
title: ¿Qué hacer cuando se produce un error?
- title: Glosario
sections:
- local: glossary/1
title: Glosario
| course/chapters/es/_toctree.yml/0 | {
"file_path": "course/chapters/es/_toctree.yml",
"repo_id": "course",
"token_count": 1282
} | 120 |
<FrameworkSwitchCourse {fw} />
<!-- DISABLE-FRONTMATTER-SECTIONS -->
# Quiz de final de capítulo
<CourseFloatingBanner
chapter={2}
classNames="absolute z-10 right-0 top-0"
/>
### 1. ¿Cuál es el orden del pipeline de modelado del lenguaje?
<Question
choices={[
{
text: "Primero, el modelo que maneja el texto y devuelve las peticiones sin procesar. El tokenizador luego da sentido a estas predicciones y las convierte nuevamente en texto cuando es necesario.",
explain: "¡El modelo no puede entender texto! El tokenizador primero debe tokenizar el texto y convertirlo a IDs para que así sea comprensible por el modelo."
},
{
text: "Primero, el tokenizador, que maneja el texto y regresa IDs. El modelo maneja estos IDs y produce una predicción, la cual puede ser algún texto.",
explain: "La predicción del modelo no puede ser texto de forma directa. ¡El tokenizador tiene que ser usado de tal forma que convierta la predicción de vuelta a texto!"
},
{
text: "El tokenizador maneja texto y regresa IDs. El modelo maneja estos IDs y produce una predicción. El tokenizador puede luego ser usado de nuevo para convertir estas predicciones de vuelta a texto.",
explain: "¡Correcto! El tokenizador puede ser usado tanto para tokenizar como des-tokenizar.",
correct: true
}
]}
/>
### 2. ¿Cuántas dimensiones tiene el tensor producido por el modelo base de Transformer y cuáles son?
<Question
choices={[
{
text: "1: La longitud de secuencia y el tamaño del lote",
explain: "¡Falso! El tensor producido por el modelo tiene una tercer dimensión: tamaño oculto."
},
{
text: "2: La longitud de secuencia y el tamaño oculto",
explain: "¡Falso! All Todos los modelos Transformer manejan lotes, aún con una sola secuencia; lo cual sería un lote de tamaño 1!"
},
{
text: "3: La longitud de secuencia, el tamaño de lote y el tamaño oculto",
explain: "¡Correcto!",
correct: true
}
]}
/>
### 3. ¿Cuál de los siguientes es un ejemplo de tokenización de subpalabras?
<Question
choices={[
{
text: "WordPiece",
explain: "¡Sí ese es un ejemplo de tokenización de subpalabras!",
correct: true
},
{
text: "Tokenización basada en caracteres",
explain: "La tokenización basada en caracteres no es un tipo de tokenización de subpalabras."
},
{
text: "División por espacios en blanco y puntuación",
explain: "¡Ese es un esquema de tokenización basado en palabras!"
},
{
text: "BPE",
explain: "¡Sí ese es un ejemplo de tokenización de subpalabras!",
correct: true
},
{
text: "Unigrama",
explain: "¡Sí ese es un ejemplo de tokenización de subpalabras!",
correct: true
},
{
text: "Ninguno de los anteriores",
explain: "¡Incorrecto!"
}
]}
/>
### 4. ¿Qué es una cabeza del modelo?
<Question
choices={[
{
text: "Un componente de la red de Transformer base que redirecciona los tensores a sus capas correctas",
explain: "¡Incorrecto! No hay tal componente."
},
{
text: "También conocido como el mecanismo de autoatención, adapta la representación de un token de acuerdo a los otros tokens de la secuencia",
explain: "¡Incorrecto! La capa de autoatención contiene \"cabezas\", pero éstas no son cabezas de adaptación."
},
{
text: "Un componente adicional, compuesto usualmente de una o unas pocas capas, para convertir las predicciones del transformador a una salida específica de la tarea",
explain: "Así es, Las cabezas de adaptación, también conocidas simplemente como cabezas, vienen en diferentes formas: cabezas de modelado de lenguaje, cabezas de respuesta a preguntas, cabezas de clasificación de secuencia... ",
correct: true
}
]}
/>
{#if fw === 'pt'}
### 5. ¿Qué es un AutoModel?
<Question
choices={[
{
text: "Un modelo que entrena automáticamente en tus datos",
explain: "Incorrecto. ¿Estás confundiendo esto con nuestro producto <a href='https://huggingface.co/autotrain'>AutoTrain</a>?"
},
{
text: "Un objeto que devuelve la arquitectura correcta basado en el punto de control",
explain: "Exacto: el <code>AutoModel</code> sólo necesita conocer el punto de control desde el cual inicializar para devolver la arquitectura correcta.",
correct: true
},
{
text: "Un modelo que detecta automáticamente el lenguaje usado por sus entradas para cargar los pesos correctos",
explain: "Incorrecto; aunque algunos puntos de control y modelos son capaces de manejar varios lenguajes, no hay herramientas integradas para la selección automática de punto de control de acuerdo al lenguaje. ¡Deberías dirigirte a <a href='https://huggingface.co/models'>Model Hub</a> para encontrar el mejor punto de control para tu tarea!"
}
]}
/>
{:else}
### 5. ¿Qué es un TFAutoModel?
<Question
choices={[
{
text: "Un modelo que entrena automáticamente en tus datos",
explain: "Incorrecto. ¿Estás confundiendo esto con nuestro producto <a href='https://huggingface.co/autotrain'>AutoTrain</a>?"
},
{
text: "Un objeto que devuelve la arquitectura correcta basado en el punto de control",
explain: "Exacto: el <code>TFAutoModel</code> sólo necesita conocer el punto de control desde el cual inicializar para devolver la arquitectura correcta.",
correct: true
},
{
text: "Un modelo que detecta automáticamente el lenguaje usado por sus entradas para cargar los pesos correctos",
explain: "Incorrecto; aunque algunos puntos de control y modelos son capaces de manejar varios lenguajes, no hay herramientas integradas para la selección automática de punto de control de acuerdo al lenguaje. ¡Deberías dirigirte a <a href='https://huggingface.co/models'>Model Hub</a> para encontrar el mejor punto de control para tu tarea!"
}
]}
/>
{/if}
### 6. ¿Cuáles son las técnicas a tener en cuenta al realizar batching de secuencias de diferentes longitudes juntas?
<Question
choices={[
{
text: "Truncado",
explain: "Sí, el truncamiento es una forma correcta de emparejar secuencias de modo que se ajusten a una forma rectangular. ¿Aunque, es la única?",
correct: true
},
{
text: "Returning tensors",
explain: "Mientras las otras técnicas te permiten devolver tensores rectangulares, returning tensors no es útil cuando se hace batching en secuencias juntas."
},
{
text: "Relleno",
explain: "Sí, el relleno es una forma correcta de emparejar secuencias de modo que se ajusten a una forma rectangular. ¿Aunque, es la única?",
correct: true
},
{
text: "Enmascarado de atención",
explain: "¡Absolutamente! Las máscaras de atención son de primera importancia cuando se manejan secuencias de diferentes longitudes. Sin embargo, no es la única técnica a tener en cuenta.",
correct: true
}
]}
/>
### 7. ¿Cuál es el punto de aplicar una función SoftMax a las salidas logits por un modelo de clasificación de secuencias?
<Question
choices={[
{
text: "Suaviza los logits para que sean más fiables.",
explain: "No, la función SoftMax no afecta en la fiabilidad de los resultados."
},
{
text: "Aplica un límite inferior y superior de modo que sean comprensibles.",
explain: "¡Correcto! Los valores resultantes están limitados entre 0 y 1. Aunque, no es la única razón por la cual usamos una función SoftMax.",
correct: true
},
{
text: "La suma total de la salida es entonces 1, dando como resultado una posible interpretación probabilística.",
explain: "¡Correcto! Aunque, esa no es la única razón por la que usamos una función SoftMax.",
correct: true
}
]}
/>
### 8. ¿En qué método se centra la mayor parte de la API del tokenizador?
<Question
choices={[
{
text: "<code>encode</code>, ya que puede codificar texto en IDs e IDs en predicciones",
explain: "¡Incorrecto! Aunque el método <code>encode</code> existe en los tokenizadores, no existe en los modelos."
},
{
text: "Llamar al objeto tokenizador directamente.",
explain: "¡Exactamente! El método <code>__call__</code> del tokenizador es un método muy poderoso el cual puede manejar casi cualquier cosa.También es el método usado para recuperar las predicciones de un modelo.",
correct: true
},
{
text: "<code>pad</code>",
explain: "¡Incorrecto! El relleno es muy útil, pero es solo una parte de la API tokenizador."
},
{
text: "<code>tokenize</code>",
explain: "El método <code>tokenize</code> es posiblemente uno de los métodos más útiles, pero no es el núcleo de la API tokenizador."
}
]}
/>
### 9. ¿Qué contiene la variable `result` en este código de ejemplo?
```py
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
result = tokenizer.tokenize("Hello!")
```
<Question
choices={[
{
text: "Una lista de strings, cada string es un token",
explain: "¡Por supuesto! ¡Convierte esto a IDs, y los envía a los modelos!",
correct: true
},
{
text: "Una lista de IDs",
explain: "Incorrecto; ¡para eso están los métodos <code>__call__</code> o <code>convert_tokens_to_ids</code>!"
},
{
text: "Una cadena que contiene todos los tokens",
explain: "Esto sería subóptimo, ya que el objetivo es dividir la cadena en varios tokens."
}
]}
/>
{#if fw === 'pt'}
### 10. ¿Hay algo mal con el siguiente código?
```py
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
model = AutoModel.from_pretrained("gpt2")
encoded = tokenizer("Hey!", return_tensors="pt")
result = model(**encoded)
```
<Question
choices={[
{
text: "No, parece correcto.",
explain: "Desafortunadamente, acoplar un modelo con un tokenizador que fue entrenado con un punto de control distinto raramente es una buena idea. El modelo no fue entrenado para dar sentido a la salida de este tokenizador, así la salida del modelo (¡si es que puede correr!) no tendrá ningún sentido."
},
{
text: "El tokenizador y el modelo siempre deben ser del mismo punto de control.",
explain: "¡Correcto!",
correct: true
},
{
text: "Es una buena práctica rellenar y truncar con el tokenizador ya que cada entrada es un lote.",
explain: "Es cierto que cada entrada de modelo necesita ser un lote. Sin embargo, truncar o rellenar esta secuencia no necesariamente hace sentido ya que sólo hay una, y esas son técnicas para juntar una lista de oraciones."
}
]}
/>
{:else}
### 10. ¿Hay algo mal con el siguiente código?
```py
from transformers import AutoTokenizer, TFAutoModel
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
model = TFAutoModel.from_pretrained("gpt2")
encoded = tokenizer("Hey!", return_tensors="pt")
result = model(**encoded)
```
<Question
choices={[
{
text: "No, parece correcto.",
explain: "Desafortunadamente, acoplar un modelo con un tokenizador que fue entrenado con un punto de control distinto raramente es una buena idea. El modelo no fue entrenado para dar sentido a la salida de este tokenizador, así la salida del modelo (¡si es que puede correr!) no tendrá ningún sentido."
},
{
text: "El tokenizador y el modelo siempre deben ser del mismo punto de control.",
explain: "¡Correcto!",
correct: true
},
{
text: "Es una buena práctica rellenar y truncar con el tokenizador ya que cada entrada es un lote.",
explain: "Es cierto que cada entrada de modelo necesita ser un lote. Sin embargo, truncar o rellenar esta secuencia no necesariamente hace sentido ya que sólo hay una, y esas son técnicas para juntar una lista de oraciones."
}
]}
/>
{/if}
| course/chapters/es/chapter2/8.mdx/0 | {
"file_path": "course/chapters/es/chapter2/8.mdx",
"repo_id": "course",
"token_count": 4729
} | 121 |
# Introducción[[introduction]]
<CourseFloatingBanner
chapter={6}
classNames="absolute z-10 right-0 top-0"
/>
En el [Capítulo 3](/course/chapter3), revisamos como hacer fine-tuning a un modelo para una tarea dada. Cuando hacemos eso, usamos el mismo tokenizador con el que el modelo fue entrenado -- pero, ¿Qué hacemos cuando queremos entrenar un modelo desde cero? En estos casos, usar un tokenizador que fue entrenado en un corpus con otro dominio u otro lenguaje típicamente no es lo más óptimo. Por ejemplo un tokenizador que es entrenado en un corpus en Inglés tendrá un desempeño pobre en un corpus de textos en Japonés porque el uso de los espacios y de la puntuación es muy diferente entre los dos lenguajes.
En este capítulo, aprenderás como entrenar un tokenizador completamente nuevo en un corpus, para que luego pueda ser usado para pre-entrenar un modelo de lenguaje. Todo esto será hecho con la ayuda de la librería [🤗 Tokenizers](https://github.com/huggingface/tokenizers), la cual provee tokenizadores rápidos (_fast tokenizers_) en la librería [🤗 Transformers](https://github.com/huggingface/transformers). Miraremos de cerca todas las características que la provee la librería, y explorar cómo los tokenizadores rápidos (fast tokenizers) difieren de las versiones "lentas".
Los temas a cubrir incluyen:
* Cómo entrenar un tokenizador nuevo similar a los usados por un checkpoint dado en un nuevo corpus de texto.
* Las características especiales de los tokenizador rápidos ("fast tokenizers").
* Las diferencias entre los tres principales algoritmos de tokenización usados en PLN hoy.
* Como construir un tokenizador desde cero con la librería 🤗 Tokenizers y entrenarlo en datos.
Las técnicas presentadas en este capítulo te prepararán para la sección en el [Capítulo 7](/course/chapter7/6) donde estudiaremos cómo crear un modelo de lenguaje para Código Fuente en Python. Comenzaremos en primer lugar revisando qué significa "entrenar" un tokenizador. | course/chapters/es/chapter6/1.mdx/0 | {
"file_path": "course/chapters/es/chapter6/1.mdx",
"repo_id": "course",
"token_count": 696
} | 122 |
<div dir="rtl">
# مقدمه
به دورهی آموزشی هاگینگفیس خوش آمدید! این مقدمه شما را در طی مراحل راهاندازی محیط کار راهنمایی میکند. اگر تازه این دوره را شروع کردهاید، پیشنهاد میکنیم ابتدا نگاهی به [فصل اول](/course/chapter1) بیاندازید و سپس به این بخش بازگشته تا محیط کاری را راهاندازی کنید و بتوانید خودتان کد را اجرا کنید.
همه کتابخانههایی که در این دورهی آموزشی استفاده خواهیم کرد، پکیجهای پایتون هستند. در این بخش میبینیم که چگونه باید محیط کار پایتون را راهاندازی نموده و کتابخانههای مورد نیاز را نصب کنید.
ما دو شیوه راهاندازی محیط کار، یکی استفاده از نوتبوک کولَب و دیگری استفاده از محیط مجازی پایتون را نشان خواهیم داد. میتوانید هرکدام را که میخواهید انتخاب کنید. اگر تازهکارها هستید، توصیه مؤکد داریم که از نوتبوک کولَب استفاده کنید.
توجه کنید که به محیط ویندوز نخواهیم پرداخت. اگر از ویندوز استفاده میکنید توصیه میکنیم از نوتبوکهای کولَب استفاده کنید. اگر از سیستمعامل مک یا یکی از توزیعهای لینوکس استفاده میکنید میتوانید هرکدام از روشهایی که در اینجا ارائه میکنیم را دنبال کنید.
برای طی بخش زیادی از این دوره نیاز به حساب کاربری هاگینگفیس دارید. پیشنهاد میکنیم همین الان [حساب خود را بسازید](https://huggingface.co/join).
<h2>استفاده از نوتبوک کولَب گوگل</h2>
استفاده از نوتبوک کولَب سادهترین راه شروع است. در مرورگر خود نوتبوکی جدید باز کرده و بلافاصله شروع به کد زدن کنید!
اگر با کولَب آشنایی ندارید پیشنهاد میکنیم از این [راهنما](https://colab.research.google.com/notebooks/intro.ipynb) استفاده کنید. کولَب به شما امکان استفاده از سختافزارهای شتابدهنده مانند GPU یا TPU میدهد و استفاده از آن برای محاسبات سبک رایگان است.
وقتی که با محیط کاربری کولَب آشنا شدید، نوتبوکی جدید بسازید و مراحل راهاندازی را شروع کنید.
<br/>
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/new_colab.png" alt="An empty colab notebook" width="80%"/>
</div>
<br/>
قدم اول نصب کتابخانههایی است که در این دوره استفاده خواهیم کرد. برای نصب کتابخانهها از `pip` استفاده میکنیم که پکیجمنیجر پایتون است. در فضای نوتبوک، برای اجرای دستورهای سیستمی، کافی است علامت `!` را به ابتدای خط اضافه کنید. برای نصب کتابخانه ترنسفورمرهای هاگینگفیس این دستور را اجرا کنید:
<div dir="ltr">
```
!pip install transformers
```
</div>
برای اطمینان از نصب صحیح این پکیج، آن را ایمپورت کنید:
<div dir="ltr">
```
import transformers
```
</div>
<br/>
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/install.gif" alt="A gif showing the result of the two commands above: installation and import" width="80%"/>
</div>
<br/>
این دستور نسخهای بسیار کم حجم از ترنسفورمرهای هاگینگفیس را نصب میکند بدون آنکه فریمورک یادگیری ماشین مشخصی مانند پایتورچ یا تنسورفلو را اضافه کند. با توجه به اینکه ما از بسیاری از قابلیتهای مختلف این کتابخانه استفاده خواهیم کرد، پیشنهاد میکنیم نسخه توسعهی این کتابخانه، که حاوی تمام پکیجهای وابسته برای تقریبا همه مسائل قابل تصور است، را نصب کنید:
<div dir="ltr">
```
!pip install transformers[sentencepiece]
```
</div>
اجرای این فرمان کمی بیشتر طول میکشد ولی برای طی بقیه دوره نیازی به نصب پکیج دیگری نخواهید داشت!
<h2>استفاده از محیط مجازی پایتون</h2>
اگر ترجیح میدهید از یکی از محیطهای مجازی پایتون استفاده کنید، اولین مرحله نصب پایتون روی سیستمتان است. پیشنهاد میکنیم از این [راهنما](https://realpython.com/installing-python/) استفاده کنید.
اگر پایتون را نصب کردید، میتوانید فرمانهای پایتون را در ترمینال اجرا کنید. قبل از اینکه به سراغ مراحل بعدی بروید، با اجرای دستور `python --version` از نصب صحیح پایتون مطمئن شوید. این دستور، نسخهی پایتون نصب شده روی سیستمتان را نمایش میدهد.
زمانی که فرمانهای پایتون را در ترمینال اجرا می کنید، نسخه "اصلی” پایتون روی سیستم خود را درگیر می کنید. توصیه می کنیم این نسخه را تمیز نگه دارید و هیچ پکیج اضافهای روی آن نصب نکنید، بلکه از آن صرفا برای ایجاد محیطهای مجازی دیگر و برای پروژههای مختلف استفاده کنید. با این روش هر پروژه میتواند وابستگیهای مخصوص به خود را داشتهباشد و دیگر نیازی نیست نگران ناسازگاریهای احتمالی میان پکیجهای نصب شده برای پروژههای مختلف باشید.
این کار در پایتون با استفاده از [<b>محیطهای مجازی</b>](https://docs.python.org/3/tutorial/venv.html) انجام میشود. محیط مجازی، پوشه ای قائم به خود روی فایلسیستم است که محتوی نسخهای مشخص از پایتون به همراه تمام پکیجهای مورد استفاده در پروژهای خاص است. ساخت این پوشه با ابزارهای مختلفی امکانپذیر است. ما در اینجا از ابزار رسمی پایتون به نام [`venv`](https://docs.python.org/3/library/venv.html#module-venv) استفاده میکنیم.
ابتدا پوشهای جدید برای پروژه خود ایجاد کنید. برای مثال پوشهای به نام <b>transformers-course</b> زیر پوشهی خانه خودتان در فایلسیستم بسازید:
<div dir="ltr">
```
mkdir ~/transformers-course
cd ~/transformers-course
```
</div>
درون این پوشه، با استفاده از ماژول `venv` پایتون، محیط مجازی خود را بسازید:
<div dir="ltr">
```
python -m venv .env
```
</div>
حالا میبایست زیر پوشه پروژه شما تنها یک پوشه دیگر به نام <b>.env</b> وجود داشته باشد.
<div dir="ltr">
```
ls -a
. .. .env
```
</div>
برای ورود و خروج از محیط مجازی پروژه خود از اسکریپتهای activate و deactivate استفاده کنید:
<div dir="ltr">
```
\# Activate the virtual environment
source .env/bin/activate
\# Deactivate the virtual environment
source .env/bin/deactivate
```
</div>
با اجرای دستور `which python` از فعال شدن محیط مجازی خود اطمینان حاصل کنید. اگر این دستور به آدرس محیط مجازی جدید اشاره کند، با موفقیت این محیط را فعال کردهاید.
<div dir="ltr">
```
which python
/home/<user>/transformers-course/.env/bin/python
```
</div>
<h3>نصب وابستگیها</h3>
مانند آنچه در بخش استفاده از گوگل کولَب گفتیم، اکنون باید پکیجهای موردنیاز برای ادامه دوره را نصب کنید. میتوانید نسخه توسعهی پکیج ترنسفورمرهای هاگینگفیس را با استفاده از پکیجمنیجر `pip` نصب کنید:
<div dir="ltr">
```
pip install "transformers[sentencepiece]"
```
</div>
شما تمام مراحل راهاندازی را طی کرده و آماده شروع دوره هستید!
</div>
| course/chapters/fa/chapter0/1.mdx/0 | {
"file_path": "course/chapters/fa/chapter0/1.mdx",
"repo_id": "course",
"token_count": 5787
} | 123 |
<!-- DISABLE-FRONTMATTER-SECTIONS -->
# Quiz de fin de chapitre
<CourseFloatingBanner
chapter={1}
classNames="absolute z-10 right-0 top-0"
/>
Ce chapitre a couvert un grand nombre de notions ! Ne vous inquiétez pas si vous n'avez pas compris tous les détails, les chapitres suivants vous aideront à comprendre comment les choses fonctionnent concrètement.
Mais avant d'aller plus loin, prenons un instant pour voir ce que vous avez appris dans ce chapitre !
### 1. Explorez le *Hub* et cherchez le modèle `roberta-large-mnli`. Quelle tâche accomplit-il ?
<Question
choices={[
{
text: "Résumé de texte",
explain: "Regardez à nouveau sur la <a href=\"https://huggingface.co/roberta-large-mnli\">page roberta-large-mnli</a>."
},
{
text: "Classification de texte",
explain: "Pour être plus précis, il classifie si deux phrases sont logiquement liées entre elles parmis trois possibilités (contradiction, neutre, lien). Il s'agit d'une tâche aussi appelée <em>inference de langage naturel</em>.",
correct: true
},
{
text: "Génération de texte",
explain: "Regardez à nouveau sur la <a href=\"https://huggingface.co/roberta-large-mnli\">page roberta-large-mnli</a>."
}
]}
/>
### 2. Que renvoie le code suivant ?
```py
from transformers import pipeline
ner = pipeline("ner", grouped_entities=True)
ner(
"My name is Sylvain and I work at Hugging Face in Brooklyn."
) # Je m'appelle Sylvain et je travaille à Hugging Face à Brooklyn.
```
<Question
choices={[
{
text: "Il renvoie les scores de classification pour cette phrase, avec les labels \"positive\" ou \"negative\".",
explain: "Cela correspondrait au pipeline <code>d'analyse de sentiment</code> (<i>sentiment-analysis</i> dans la documentation d'Hugging-Face)."
},
{
text: "Il renvoie un texte généré qui complète cette phrase.",
explain: "Cela correspondrait au pipeline de <code>génération de texte</code> (<i>text-generation</i> dans la documentation d'Hugging-Face)."
},
{
text: "Il renvoie les entités nommées dans cette phrase, telles que les personnes, les organisations ou lieux.",
explain: "De plus, avec <code>grouped_entities=True</code>, cela regroupe les mots appartenant à la même entité, comme par exemple \"Hugging Face\".",
correct: true
}
]}
/>
### 3. Que remplace « ... » dans ce code ?
```py
from transformers import pipeline
filler = pipeline("fill-mask", model="bert-base-cased")
result = filler("...")
```
<Question
choices={[
{
text: "This <mask> has been waiting for you. # Ce <mask> vous attend.",
explain: "Regardez la description du modèle <code>bert-base-cased</code> et essayez de trouver votre erreur."
},
{
text: "This [MASK] has been waiting for you. # Ce [MASK] vous attend.",
explain: "Le modèle utilise [MASK] comme mot-masque.",
correct: true
},
{
text: "This man has been waiting for you. # Cet homme vous attend.",
explain: "Ce pipeline permet de remplacer les mot manquants donc il a besoin d'un mot-masque."
}
]}
/>
### 4. Pourquoi ce code ne fonctionne-t-il pas ?
```py
from transformers import pipeline
classifier = pipeline("zero-shot-classification")
result = classifier(
"This is a course about the Transformers library"
) # C'est un cours sur la bibliothèque Transformers
```
<Question
choices={[
{
text: "Ce pipeline nécessite que des étiquettes soient données pour classifier ce texte.",
explain: "Le code doit inclure <code>candidate_labels=[...]</code>.",
correct: true
},
{
text: "Ce pipeline nécessite que des phrases soient données, pas juste une phrase.",
explain: "Bien que ce pipeline puisse prendre une liste de phrases à traiter (comme tous les autres pipelines)."
},
{
text: "La bibliothèque 🤗 <i>Transformers</i> est cassée, comme d'habitude.",
explain: "Nous n'avons aucun commentaire pour cette réponse !",
},
{
text: "Ce pipeline nécessite des phrases plus longues, celle-ci est trop courte.",
explain: "Notez que si un texte est très long, il est tronqué par le pipeline."
}
]}
/>
### 5. Que signifie « apprentissage par transfert » ?
<Question
choices={[
{
text: "Transférer les connaissances d'un modèle pré-entraîné vers un nouveau modèle en entraînant ce second modèle sur le même jeu de données.",
explain: "Non, cela donnerait deux versions du même modèle."
},
{
text: "Transférer les connaissances d'un modèle pré-entraîné vers un nouveau modèle en initialisant ce second modèle avec les poids du premier.",
explain: "Quand le second modèle est entraîné sur une nouvelle tâche, il transfère les connaissances du premier modèle.",
correct: true
},
{
text: "Transférer les connaissances d'un modèle pré-entraîné vers un nouveau modèle en construisant le second modèle avec la même architecture que le premier.",
explain: "L'architecture correspond uniquement à la structure du modèle, pas à ses connaissances. Il n'y a donc pas de connaissances à transférer dans ce cas.",
}
]}
/>
### 6. Vrai ou faux ? Un modèle de langage n'a généralement pas besoin d'étiquettes pour son pré-entraînement.
<Question
choices={[
{
text: "Vrai",
explain: "Le pré-entraînement est <em>autosupervisé</em>, ce qui signifie que les étiquettes sont créées automatiquement à partir des données d'entrée (comme prédire le mot suivant ou remplacer des mots masqués).",
correct: true
},
{
text: "Faux",
explain: "Ce n'est pas la bonne réponse."
}
]}
/>
### 7. Sélectionnez la phrase qui décrit le mieux les termes « modèle », « architecture » et « poids ».
<Question
choices={[
{
text: "Si un modèle est un bâtiment, son architecture est le plan de construction et les poids représentent les personnes qui vivent dedans.",
explain: "Si on suit cette métaphore, les poids seraient plutôt les briques et les matériaux utilisés pour construire le bâtiment."
},
{
text: "Une architecture est une carte pour construire un modèle et les poids sont les villes représentées sur la carte.",
explain: "Le problème avec cette métaphore est que la carte représente une réalité existante (il n'y a qu'une seule ville nommée Paris en France). Pour une architecture donnée, plusieurs poids sont possibles."
},
{
text: "Une architecture est une succession de fonctions mathématiques permettant de construire un modèle et les poids sont les paramètres de ces fonctions.",
explain: "Le même ensemble de fonctions mathématiques peut être utilisé pour construire plusieurs modèles avec différents paramètres (poids).",
correct: true
}
]}
/>
### 8. Parmi ces types de modèles, quel est le plus approprié pour générer du texte à partir d'une instruction (*prompt*) ?
<Question
choices={[
{
text: "Un modèle basé sur l'encodeur",
explain: "Un modèle basé sur l'encodeur génère une représentation de la phrase entière qui est plus adaptée à des tâches de classification."
},
{
text: "Un modèle basé sur le décodeur",
explain: "Les modèles basés sur le décodeur sont bien pour générer du texte à partir d'une instruction.",
correct: true
},
{
text: "Un modèle de séquence-à-séquence",
explain: "Les modèles de séquence-à-séquence sont davantage adaptés aux tâches qui nécessitent de générer des phrases à partir d'un texte donné en entrée, pas un texte généré à partir d'une instruction."
}
]}
/>
### 9. Parmi ces types de modèles, quel est le plus approprié pour le résumé de texte ?
<Question
choices={[
{
text: "Un modèle basé sur l'encodeur",
explain: "Un modèle basé sur l'encodeur génère une représentation de la phrase entière qui est plus adaptée à des tâches de classification.",
},
{
text: "Un modèle basé sur le décodeur",
explain: "Les modèles basés sur le décodeur sont bien pour générer du texte (comme les résumés) mais ils n'ont pas la capacité d'exploiter un contexte comme un texte entier pour en faire un résumé.",
},
{
text: "Un modèle de séquence-à-séquence",
explain: "Les modèles de séquence-à-séquence sont parfaitement adaptés à une tâche de résumé.",
correct: true
}
]}
/>
### 10. Quel type de modèle utiliseriez-vous pour classifier des entrées de texte en fonction de certains labels ?
<Question
choices={[
{
text: "Un modèle basé sur l'encodeur",
explain: "Un modèle basé sur un encodeur génère une représentation de la phrase entière et est donc parfaitement adapté à des tâches de classification.",
correct: true
},
{
text: "Un modèle basé sur le décodeur",
explain: "Les modèles basés sur le décodeur sont bons pour générer des textes et non pour extraire une étiquette d'une phrase.",
},
{
text: "Un modèle de séquence-à-séquence",
explain: "Les modèles de séquence-à-séquence sont davantage adaptés pour des tâches qui nécessitent de générer des phrases à partir d'un texte donné en entrée, non pour extraire une étiquette à partir d'une phrase.",
}
]}
/>
### 11. De quelle source possible peut être le biais observé dans un modèle ?
<Question
choices={[
{
text: "Le modèle est une version <i>finetunée</i> d'un modèle pré-entraîné et il a conservé ses biais.",
explain: "Avec l'apprentissage par transfert, les biais du modèle pré-entraîné perdurent dans le modèle <i>finetuné</i>.",
correct: true
},
{
text: "Le modèle a été entraîné sur des données qui sont biaisées.",
explain: "Ceci représente la source de biais la plus évidente mais n'est pas la seule possible.",
correct: true
},
{
text: "La métrique optimisée lors de l'entraînement du modèle est biaisée.",
explain: "Une source moins évidente est la façon dont le modèle est entraîné. Votre modèle va de façon aveugle optimiser la métrique que vous avez sélectionnée, sans prendre aucun recul.",
correct: true
}
]}
/>
| course/chapters/fr/chapter1/10.mdx/0 | {
"file_path": "course/chapters/fr/chapter1/10.mdx",
"repo_id": "course",
"token_count": 3854
} | 124 |
<FrameworkSwitchCourse {fw} />
<!-- DISABLE-FRONTMATTER-SECTIONS -->
# Quiz de fin de chapitre
<CourseFloatingBanner
chapter={2}
classNames="absolute z-10 right-0 top-0"
/>
### 1. Quel est l'ordre du pipeline de modélisation du langage ?
<Question
choices={[
{
text: " Tout d'abord, le modèle, qui traite le texte et renvoie des prédictions brutes. Puis le <i>tokenizer</i> donne un sens à ces prédictions et les reconvertit en texte si nécessaire.",
explain: " Le modèle ne peut pas comprendre le texte ! Le <i>tokenizer</i> doit d'abord tokeniser le texte et le convertir en identifiants afin qu'il soit compréhensible par le modèle."},
{
text: " Tout d'abord, le <i>tokenizer</i>, qui traite le texte et renvoie des identifiants. Puis le modèle traite ces identifiants et produit une prédiction, qui peut être du texte.",
explain: " La prédiction du modèle ne peut pas être du texte immédiatement. Le <i>tokenizer</i> doit être utilisé afin de reconvertir la prédiction en texte !"},
{
text: " Le <i>tokenizer</i> traite le texte et renvoie des identifiants. Le modèle traite ces identifiants et produit une prédiction. Le <i>tokenizer</i> peut alors être utilisé à nouveau pour reconvertir ces prédictions en texte.",
explain: " Le <i>tokenizer</i> peut être utilisé à la fois pour la tokenisation et la dé-tokénisation.",
correct: true
}
]}
/>
### 2. Combien de dimensions le tenseur produit par le <i>transformer</i> de base possède-t-il et quelles sont-elles ?
<Question
choices={[
{
text: "2: la longueur de la séquence et la taille du batch",
explain: "Le tenseur produit par le modèle possède une troisième dimension : la taille cachée."
},
{
text: "2: la longueur de la séquence et la taille cachée",
explain: "Tous les <i>transformers</i> gèrent les batchs, même avec une seule séquence ce serait une taille de batch de 1 !"
},
{
text: "3: la longueur de la séquence, la taille du batch et la taille cachée.",
explain: "",
correct: true
}
]}
/>
### 3. Lequel des éléments suivants est un exemple de tokenisation en sous-mots ?
<Question
choices={[
{
text: "WordPiece",
explain: "Oui, c'est un exemple de tokenisation en sous-mots !",
correct: true
},
{
text: "La tokenization basée sur les caractères",
explain: "La tokenization basée sur les caractères n’est pas un type de tokenisation en sous-mots."
},
{
text: "Découpage sur les espaces et la ponctuation",
explain: "C’est une tokenisation basée sur les mots !"
},
{
text: "BPE",
explain: "Oui, c'est un exemple de tokenisation en sous-mots !",
correct: true
},
{
text: "Unigram",
explain: "Oui, c'est un exemple de tokenisation en sous-mots !",
correct: true
},
{
text: "Aucune des propositions ci-dessus",
explain: ""
}
]}
/>
### 4. Qu'est-ce qu'une tête de modèle ?
<Question
choices={[
{
text: " Un composant du <i>transformer</i> de base qui redirige les tenseurs vers leurs couches correctes.",
explain: "Il n'y a pas de tel composant."
},
{
text: "Également connu sous le nom de mécanisme d'auto-attention, il adapte la représentation d'un <i>token</i> en fonction des autres <i>tokens</i> de la séquence.",
explain: "La couche d'auto-attention contient des têtes d'attention mais ce ne sont pas des têtes d'adaptation."
},
{
text: "Un composant supplémentaire, généralement constitué d'une ou plusieurs couches, pour convertir les prédictions du <i>transformer</i> en une sortie spécifique à la tâche.",
explain: "Les têtes d'adaptation, aussi appelées simplement têtes, se présentent sous différentes formes : têtes de modélisation du langage, têtes de réponse aux questions, têtes de classification des séquences, etc.",
correct: true
}
]}
/>
{#if fw === 'pt'}
### 5. Qu'est-ce qu'un AutoModel?
<Question
choices={[
{
text: "Un modèle qui s'entraîne automatiquement sur vos données",
explain: "Vous confondez cela avec notre produit <a href='https://huggingface.co/autotrain>AutoTrain</a>"
},
{
text: "Un objet qui renvoie la bonne architecture basée sur le <i>checkpoint</i> .",
explain: "Exactement : <code>AutoModel</code> a seulement besoin de connaître le <i>checkpoint</i> à partir duquel il doit s'initialiser pour retourner à la bonne architecture.",
correct: true
},
{
text: "Un modèle qui détecte automatiquement la langue utilisée pour ses entrées afin de charger les bonnes pondérations.",
explain: "Bien que certains <i>checkpoints</i> et modèles soient capables de gérer plusieurs langues, il n'existe pas d'outils intégrés pour la sélection automatique des <i>checkpoints</i> en fonction de la langue. Vous devez vous rendre sur le <a href='https://huggingface.co/models'>Hub des modèles</a> pour trouver le meilleur <i>checkpoint</i> pour votre tâche !"
}
]}
/>
{:else}
### 5. What is an AutoModel?
<Question
choices={[
{
text: "Un modèle qui s'entraîne automatiquement sur vos données",
explain: "Vous confondez cela avec notre produit <a href='https://huggingface.co/autotrain>AutoTrain</a>"
},
{
text: "Un objet qui renvoie la bonne architecture basée sur le <i>checkpoint</i> .",
explain: "Exactement : <code>TFAutoModel</code> a seulement besoin de connaître le <i>checkpoint</i> à partir duquel il doit s'initialiser pour retourner à la bonne architecture.",
correct: true
},
{
text: "Un modèle qui détecte automatiquement la langue utilisée pour ses entrées afin de charger les bonnes pondérations.",
explain: "Bien que certains <i>checkpoints</i> et modèles soient capables de gérer plusieurs langues, il n'existe pas d'outils intégrés pour la sélection automatique des <i>checkpoints</i> en fonction de la langue. Vous devez vous rendre sur le <a href='https://huggingface.co/models'>Hub des modèles</a> pour trouver le meilleur <i>checkpoint</i> pour votre tâche !"
}
]}
/>
{/if}
### 6. Quelles sont les techniques à connaître lors de la mise en batch de séquences de longueurs différentes ?
<Question
choices={[
{
text: "La troncature",
explain: " La troncature est une façon correcte d'égaliser les séquences pour qu'elles s'inscrivent dans une forme rectangulaire. Mais est-ce la seule ?",
correct: true
},
{
text: "Retourner les tenseurs",
explain: "Alors que les autres techniques vous permettent de renvoyer des tenseurs rectangulaires, retourner les tenseurs n'est pas utile lorsque vous mettez en batch des séquences."
},
{
text: "Le <i>padding</i>",
explain: "Le <i>padding</i> est une façon correcte d'égaliser les séquences pour qu'elles tiennent dans une forme rectangulaire. Mais est-ce le seul moyen ?",
correct: true
},
{
text: "Les masques d'attention ",
explain: "Les masques d'attention sont d'une importance capitale lorsqu'on manipule des séquences de longueurs différentes. Ce n'est cependant pas la seule technique à laquelle il faut faire attention.",
correct: true
}
]}
/>
### 7. Quel est l'intérêt d'appliquer une fonction SoftMax aux logits produits par un modèle de classification de séquences ?
<Question
choices={[
{
text: "Elle adoucit les logits pour qu'ils soient plus fiables.",
explain: "La fonction SoftMax n'affecte pas la fiabilité des résultats."
},
{
text: "Elle applique une limite inférieure et supérieure pour qu'ils soient compréhensibles.",
explain: "Les valeurs résultantes sont comprises entre 0 et 1. Ce n'est cependant pas la seule raison pour laquelle nous utilisons une fonction SoftMax.",
correct: true
},
{
text: "La somme totale des sorties est alors égale à 1, ce qui permet une interprétation probabiliste.",
explain: "Mais ce n'est pas la seule raison pour laquelle nous utilisons une fonction SoftMax.",
correct: true
}
]}
/>
### 8. Autour de quelle méthode s'articule la majeure partie de l'API <i>tokenizer</i> ?
<Question
choices={[
{
text: "<code>encode</code>, car elle peut encoder du texte en identifiants et des identifiants en prédictions.",
explain: "Bien que la méthode <code>encode</code> existe sur les <i>tokenizer</i>, elle n'existe pas sur les modèles."
},
{
text: "Appeler directement l'objet tokenizer",
explain: "La méthode <code>__call__</code> du <i>tokenizer</i> est une méthode très puissante qui peut traiter à peu près tout. C'est également la méthode utilisée pour récupérer les prédictions d'un modèle.",
correct: true
},
{
text: "<code>pad</code>",
explain: "Le <i>padding</i> est très utile mais ce n'est qu'une partie de l'API <i>tokenizer</i>."
},
{
text: "<code>tokenize</code>",
explain: "La méthode <code>tokenize</code> est est sans doute l'une des méthodes les plus utiles, mais elle ne constitue pas le cœur de l'API <i>tokenizer</i>."
}
]}
/>
### 9. Que contient la variable `result` dans cet exemple de code ?
```py
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
result = tokenizer.tokenize("Hello!")
```
<Question
choices={[
{
text: "Une liste de chaînes de caractères, chaque chaîne étant un <i>token</i>.",
explain: "Convertissez cela en identifiants, et donnez-les à un modèle !",
correct: true
},
{
text: "Une liste d'identifiants",
explain: "C'est à cela que la méthode <code>__call__</code> ou la méthode <code>convert_tokens_to_ids</code> sert !"
},
{
text: "Une chaîne contenant tous les <i>tokens</i>",
explain: "Ce serait sous-optimal car le but est de diviser la chaîne de caractères en plusieurs éléments."
}
]}
/>
{#if fw === 'pt'}
### 10. Y a-t-il un problème avec le code suivant ?
```py
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
model = AutoModel.from_pretrained("gpt2")
encoded = tokenizer("Hey!", return_tensors="pt")
result = model(**encoded)
```
<Question
choices={[
{
text: "Non, ça semble correct.",
explain: "Malheureusement, coupler un modèle avec un <i>tokenizer</i> qui a été entraîné avec un <i>checkpoint</i> différent est rarement une bonne idée. Le modèle n'a pas été entraîné pour donner du sens à la sortie de ce <i>tokenizer</i> donc la sortie du modèle (s'il peut même fonctionner !) n'aura aucun sens."
},
{
text: " Le <i>tokenizer</i> et le modèle doivent toujours provenir du même <i>checkpoint</i>.",
explain: "",
correct: true
},
{
text: " C'est une bonne pratique de faire du <i>padding</i> et de troncage avec le <i>tokenizer</i> car chaque entrée est un batch.",
explain: "Il est vrai que chaque entrée de modèle doit être un batch. Cependant, tronquer ou compléter cette séquence n'aurait pas nécessairement de sens puisqu'il n'y en a qu'une seule. Il s'agit là de techniques permettant de mettre en batch une liste de phrases."
}
]}
/>
{:else}
### 10. Y a-t-il un problème avec le code suivant ?
```py
from transformers import AutoTokenizer, TFAutoModel
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
model = TFAutoModel.from_pretrained("gpt2")
encoded = tokenizer("Hey!", return_tensors="pt")
result = model(**encoded)
```
<Question
choices={[
{
text: "Non, ça semble correct.",
explain: "Malheureusement, coupler un modèle avec un <i>tokenizer</i> qui a été entraîné avec un <i>checkpoint</i> différent est rarement une bonne idée. Le modèle n'a pas été entraîné pour donner du sens à la sortie de ce <i>tokenizer</i> donc la sortie du modèle (s'il peut même fonctionner !) n'aura aucun sens."
},
{
text: " Le <i>tokenizer</i> et le modèle doivent toujours provenir du même <i>checkpoint</i>.",
explain: "",
correct: true
},
{
text: " C'est une bonne pratique de faire du <i>padding</i> et de troncage avec le <i>tokenizer</i> car chaque entrée est un batch.",
explain: "Il est vrai que chaque entrée de modèle doit être un batch. Cependant, tronquer ou compléter cette séquence n'aurait pas nécessairement de sens puisqu'il n'y en a qu'une seule. Il s'agit là de techniques permettant de mettre en batch une liste de phrases."
}
]}
/>
{/if}
| course/chapters/fr/chapter2/8.mdx/0 | {
"file_path": "course/chapters/fr/chapter2/8.mdx",
"repo_id": "course",
"token_count": 5004
} | 125 |
# Il est temps de trancher et de découper
<CourseFloatingBanner chapter={5}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "English", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter5/section3.ipynb"},
{label: "Français", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/fr/chapter5/section3.ipynb"},
{label: "English", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter5/section3.ipynb"},
{label: "Français", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/fr/chapter5/section3.ipynb"},
]} />
La plupart du temps, les données avec lesquelles vous travaillez ne sont pas parfaitement préparées pour l’entraînements de modèles. Dans cette section, nous allons explorer les différentes fonctionnalités fournies par 🤗 *Datasets* pour nettoyer vos jeux de données.
<Youtube id="tqfSFcPMgOI"/>
## Trancher et découper nos données
Semblable à Pandas, 🤗 *Datasets* fournit plusieurs fonctions pour manipuler le contenu des objets `Dataset` et `DatasetDict`. Nous avons déjà rencontré la méthode `Dataset.map()` dans le [chapitre 3](/course/fr/chapter3) et dans cette section nous allons explorer certaines des autres fonctions à notre disposition.
Pour cet exemple, nous utiliserons le [*Drug Review Dataset*](https://archive.ics.uci.edu/ml/datasets/Drug+Review+Dataset+%28Drugs.com%29) qui est hébergé sur [*UC Irvine Machine Learning Repository*](https://archive.ics.uci.edu/ml/index.php) et contenant des avis de patients sur divers médicaments ainsi que la condition traitée et une note de 10 étoiles sur la satisfaction du patient.
Nous devons d'abord télécharger et extraire les données, ce qui peut être fait avec les commandes `wget` et `unzip` :
```py
!wget "https://archive.ics.uci.edu/ml/machine-learning-databases/00462/drugsCom_raw.zip"
!unzip drugsCom_raw.zip
```
Étant donné que TSV n'est qu'une variante de CSV qui utilise des tabulations au lieu de virgules comme séparateurs, nous pouvons charger ces fichiers en utilisant le script de chargement `csv` et en spécifiant l'argument `delimiter` dans la fonction `load_dataset()` comme suit :
```py
from datasets import load_dataset
data_files = {"train": "drugsComTrain_raw.tsv", "test": "drugsComTest_raw.tsv"}
# \t est le caractère de tabulation en Python
drug_dataset = load_dataset("csv", data_files=data_files, delimiter="\t")
```
Une bonne pratique lors de toute sorte d'analyse de données consiste à prélever un petit échantillon aléatoire pour avoir une idée rapide du type de données avec lesquelles vous travaillez. Dans 🤗 *Datasets*, nous pouvons créer un échantillon aléatoire en enchaînant les fonctions `Dataset.shuffle()` et `Dataset.select()` :
```py
drug_sample = drug_dataset["train"].shuffle(seed=42).select(range(1000))
# Un coup d'œil sur les premiers exemples
drug_sample[:3]
```
```python out
{'Unnamed: 0': [87571, 178045, 80482],
'drugName': ['Naproxen', 'Duloxetine', 'Mobic'],
'condition': ['Gout, Acute', 'ibromyalgia', 'Inflammatory Conditions'],
#['Goutte aiguë', 'ibromyalgie', 'Affections inflammatoires']
'review': ['"like the previous person mention, I'm a strong believer of aleve, it works faster for my gout than the prescription meds I take. No more going to the doctor for refills.....Aleve works!"',
# comme la personne précédente l'a mentionné, je suis un fervent partisan de l'aleve, il fonctionne plus rapidement pour ma goutte que les médicaments sur ordonnance que je prends. Je n'ai plus besoin d'aller chez le médecin pour des renouvellements.....Aleve fonctionne !"
'"I have taken Cymbalta for about a year and a half for fibromyalgia pain. It is great\r\nas a pain reducer and an anti-depressant, however, the side effects outweighed \r\nany benefit I got from it. I had trouble with restlessness, being tired constantly,\r\ndizziness, dry mouth, numbness and tingling in my feet, and horrible sweating. I am\r\nbeing weaned off of it now. Went from 60 mg to 30mg and now to 15 mg. I will be\r\noff completely in about a week. The fibro pain is coming back, but I would rather deal with it than the side effects."',
# J'ai pris du Cymbalta pendant environ un an et demi pour des douleurs de la fibromyalgie. C'est un excellent analgésique et un antidépresseur, mais les effets secondaires l'ont emporté sur tous les avantages que j'en ai tirés. J'ai eu des problèmes d'agitation, de fatigue constante, de vertiges, de bouche sèche, d'engourdissement, de picotements dans les pieds, et de transpiration horrible. Je suis en train de m'en sevrer maintenant. Je suis passée de 60 mg à 30 mg et maintenant à 15 mg. Je l'arrêterai complètement dans environ une semaine. La douleur de la fibrose revient, mais je préfère la supporter plutôt que les effets secondaires.
'"I have been taking Mobic for over a year with no side effects other than an elevated blood pressure. I had severe knee and ankle pain which completely went away after taking Mobic. I attempted to stop the medication however pain returned after a few days."'],
# J'ai pris Mobic pendant plus d'un an sans effets secondaires autres qu'une pression sanguine élevée. J'avais de fortes douleurs au genou et à la cheville qui ont complètement disparu après avoir pris Mobic. J'ai essayé d'arrêter le médicament mais la douleur est revenue après quelques jours."
'rating': [9.0, 3.0, 10.0],
'date': ['September 2, 2015', 'November 7, 2011', 'June 5, 2013'],
#['2 septembre 2015', '7 novembre 2011', '5 juin 2013']
'usefulCount': [36, 13, 128]}
```
Notez que nous avons corrigé la graine dans `Dataset.shuffle()` à des fins de reproductibilité. `Dataset.select()` attend un itérable d'indices, nous avons donc passé `range(1000)` pour récupérer les 1 000 premiers exemples du jeu de données mélangé. À partir de cet échantillon, nous pouvons déjà voir quelques bizarreries dans notre jeu de données :
* la colonne `Unnamed: 0` ressemble étrangement à un identifiant anonyme pour chaque patient,
* la colonne `condition` comprend un mélange d'étiquettes en majuscules et en minuscules,
* les avis sont de longueur variable et contiennent un mélange de séparateurs de lignes Python (`\r\n`) ainsi que des codes de caractères HTML comme `&\#039;`.
Voyons comment nous pouvons utiliser 🤗 *Datasets* pour traiter chacun de ces problèmes. Pour tester l'hypothèse de l'ID patient pour la colonne `Unnamed : 0`, nous pouvons utiliser la fonction `Dataset.unique()` pour vérifier que le nombre d'ID correspond au nombre de lignes dans chaque division :
```py
for split in drug_dataset.keys():
assert len(drug_dataset[split]) == len(drug_dataset[split].unique("Unnamed: 0"))
```
Cela semble confirmer notre hypothèse, alors nettoyons un peu en renommant la colonne `Unnamed: 0` en quelque chose d'un peu plus interprétable. Nous pouvons utiliser la fonction `DatasetDict.rename_column()` pour renommer la colonne sur les deux divisions en une seule fois :
```py
drug_dataset = drug_dataset.rename_column(
original_column_name="Unnamed: 0", new_column_name="patient_id"
)
drug_dataset
```
```python out
DatasetDict({
train: Dataset({
features: ['patient_id', 'drugName', 'condition', 'review', 'rating', 'date', 'usefulCount'],
num_rows: 161297
})
test: Dataset({
features: ['patient_id', 'drugName', 'condition', 'review', 'rating', 'date', 'usefulCount'],
num_rows: 53766
})
})
```
<Tip>
✏️ **Essayez !** Utilisez la fonction ` Dataset.unique()` pour trouver le nombre de médicaments et de conditions uniques dans les échantillons d'entraînement et de test.
</Tip>
Ensuite, normalisons toutes les étiquettes `condition` en utilisant `Dataset.map()`. Comme nous l'avons fait avec la tokenisation dans le [chapitre 3](/course/fr/chapter3), nous pouvons définir une fonction simple qui peut être appliquée sur toutes les lignes de chaque division dans `drug_dataset` :
```py
def lowercase_condition(example):
return {"condition": example["condition"].lower()}
drug_dataset.map(lowercase_condition)
```
```python out
AttributeError: 'NoneType' object has no attribute 'lower'
```
Oh non, nous rencontrons un problème avec notre fonction ! À partir de l'erreur, nous pouvons déduire que certaines des entrées de la colonne `condition` sont `None` ne pouvant donc pas être mises en minuscules car ce ne sont pas des chaînes. Supprimons ces lignes en utilisant `Dataset.filter()`, qui fonctionne de manière similaire à `Dataset.map()` et attend une fonction qui reçoit un seul exemple issu du jeu de données. Au lieu d'écrire une fonction explicite comme :
```py
def filter_nones(x):
return x["condition"] is not None
```
puis exécuter `drug_dataset.filter(filter_nones)`, nous pouvons le faire en une seule ligne en utilisant une _fonction lambda_. En Python, les fonctions lambda sont de petites fonctions que vous pouvez définir sans les nommer explicitement. Ils prennent la forme générale :
```
lambda <arguments> : <expression>
```
où `lambda` est l'un des [mots clés](https://docs.python.org/3/reference/lexical_analysis.html#keywords) spéciaux de Python, `<arguments>` est une liste/ensemble de valeurs séparées par des virgules qui définissent les entrées de la fonction et `<expression>` représente les opérations que vous souhaitez exécuter. Par exemple, nous pouvons définir une simple fonction lambda qui met au carré un nombre comme suit :
```
lambda x : x * x
```
Pour appliquer cette fonction à une entrée, nous devons l'envelopper ainsi que l'entrée entre parenthèses :
```py
(lambda x: x * x)(3)
```
```python out
9
```
De même, nous pouvons définir des fonctions lambda avec plusieurs arguments en les séparant par des virgules. Par exemple, nous pouvons calculer l'aire d'un triangle comme suit :
```py
(lambda base, height: 0.5 * base * height)(4, 8)
```
```python out
16.0
```
Les fonctions lambda sont pratiques lorsque vous souhaitez définir de petites fonctions à usage unique (pour plus d'informations à leur sujet, nous vous recommandons de lire l'excellent [tutoriel Real Python](https://realpython.com/python-lambda/) d'André Burgaud) . Dans le contexte de la bibliothèque 🤗 *Datasets*, nous pouvons utiliser des fonctions lambda pour définir des opérations simples de « mappage » et de filtrage. Utilisons cette astuce pour éliminer les entrées `None` dans notre jeu de données :
```py
drug_dataset = drug_dataset.filter(lambda x: x["condition"] is not None)
```
Avec les entrées `None` supprimées, nous pouvons normaliser notre colonne `condition` :
```py
drug_dataset = drug_dataset.map(lowercase_condition)
# Vérification que la mise en minuscule a fonctionné
drug_dataset["train"]["condition"][:3]
```
```python out
['left ventricular dysfunction', 'adhd', 'birth control']
```
Ça marche ! Maintenant que nous avons nettoyé les étiquettes, examinons le nettoyage des avis eux-mêmes.
## Création de nouvelles colonnes
Chaque fois que vous avez affaire à des avis de clients, une bonne pratique consiste à vérifier le nombre de mots dans chaque avis. Une critique peut être un simple mot comme « Génial ! » ou un essai complet avec des milliers de mots. Selon le cas d'usage, vous devrez gérer ces extrêmes différemment. Pour calculer le nombre de mots dans chaque révision, nous utiliserons une heuristique approximative basée sur la division de chaque texte par des espaces.
Définissons une fonction simple qui compte le nombre de mots dans chaque avis :
```py
def compute_review_length(example):
return {"review_length": len(example["review"].split())}
```
Contrairement à notre fonction `lowercase_condition()`, `compute_review_length()` renvoie un dictionnaire dont la clé ne correspond pas à l'un des noms de colonne du jeu de données. Dans ce cas, lorsque `compute_review_length()` est passé à `Dataset.map()`, il est appliqué à toutes les lignes du jeu de données pour créer une nouvelle colonne `review_length` :
```py
drug_dataset = drug_dataset.map(compute_review_length)
# Inspecter le premier exemple d'entraînement
drug_dataset["train"][0]
```
```python out
{'patient_id': 206461,
'drugName': 'Valsartan',
'condition': 'left ventricular dysfunction', # dysfonctionnement du ventricule gauche
'review': '"It has no side effect, I take it in combination of Bystolic 5 Mg and Fish Oil"',
# Il n'a aucun effet secondaire, je le prends en combinaison avec Bystolic 5 mg et de l'huile de poisson.
'rating': 9.0,
'date': 'May 20, 2012', # 20 mai 2012
'usefulCount': 27,
'review_length': 17}
```
Comme prévu, nous pouvons voir qu'une colonne `review_length` a été ajoutée à notre jeu d'entraînement. Nous pouvons trier cette nouvelle colonne avec `Dataset.sort()` pour voir à quoi ressemblent les valeurs extrêmes :
```py
drug_dataset["train"].sort("review_length")[:3]
```
```python out
{'patient_id': [103488, 23627, 20558],
'drugName': ['Loestrin 21 1 / 20', 'Chlorzoxazone', 'Nucynta'],
'condition': ['birth control', 'muscle spasm', 'pain'],
# contraception, spasme musculaire, douleur.
'review': ['"Excellent."', '"useless"', '"ok"'], # Excellent, inutile, ok
'rating': [10.0, 1.0, 6.0],
'date': ['November 4, 2008', 'March 24, 2017', 'August 20, 2016'],
# 4 novembre 2008, 24 mars 2017, 20 août 2016
'usefulCount': [5, 2, 10],
'review_length': [1, 1, 1]}
```
Comme nous le soupçonnions, certaines critiques ne contiennent qu'un seul mot, ce qui, bien que cela puisse convenir à l'analyse des sentiments, n’est pas informatif si nous voulons prédire la condition.
<Tip>
🙋 Une autre façon d'ajouter de nouvelles colonnes à un jeu de données consiste à utiliser la fonction `Dataset.add_column()`. Cela vous permet de donner la colonne sous forme de liste Python ou de tableau NumPy et peut être utile dans les situations où `Dataset.map()` n'est pas bien adapté à votre analyse.
</Tip>
Utilisons la fonction `Dataset.filter()` pour supprimer les avis contenant moins de 30 mots. De la même manière que nous l'avons fait avec la colonne `condition`, nous pouvons filtrer les avis très courts en exigeant que les avis aient une longueur supérieure à ce seuil :
```py
drug_dataset = drug_dataset.filter(lambda x: x["review_length"] > 30)
print(drug_dataset.num_rows)
```
```python out
{'train': 138514, 'test': 46108}
```
Comme vous pouvez le constater, cela a supprimé environ 15 % des avis de nos jeux d'entraînement et de test d'origine.
<Tip>
✏️ **Essayez !** Utilisez la fonction `Dataset.sort()` pour inspecter les avis avec le plus grand nombre de mots. Consultez la [documentation](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.sort) pour voir quel argument vous devez utiliser pour trier les avis par longueur dans l'ordre décroissant.
</Tip>
La dernière chose à laquelle nous devons faire face est la présence de caractères HTML dans nos avis. Nous pouvons utiliser le module `html` de Python pour supprimer ces caractères, comme ceci :
```py
import html
text = "I'm a transformer called BERT"
html.unescape(text)
```
```python out
"I'm a transformer called BERT"
```
Nous utilisons `Dataset.map()` pour démasquer tous les caractères HTML de notre corpus :
```python
drug_dataset = drug_dataset.map(lambda x: {"review": html.unescape(x["review"])})
```
Comme vous pouvez le voir, la méthode `Dataset.map()` est très utile pour le traitement des données. Et nous n'avons même pas effleuré la surface de tout ce qu'elle peut faire !
## Les superpouvoirs de la méthode `map()`
La méthode `Dataset.map()` prend un argument `batched` qui, s'il est défini sur `True`, l'amène à envoyer un batch d'exemples à la fonction *map* en une seule fois (la taille du batch est configurable mais est fixé par défaut à 1 000). Par exemple, la fonction `map()` précédente qui supprime tout le code HTML prend un peu de temps à s'exécuter (vous pouvez lire le temps pris dans les barres de progression). On peut accélérer cela en traitant plusieurs éléments en même temps à l'aide d'une compréhension de liste.
Lorsque vous spécifiez `batched=True`, la fonction reçoit un dictionnaire avec les champs du jeu de données mais chaque valeur est maintenant une _liste de valeurs_ et non plus une seule valeur. La valeur retournée par `Dataset.map()` devrait être la même : un dictionnaire avec les champs que nous voulons mettre à jour ou ajouter à notre jeu de données, et une liste de valeurs. Par exemple, voici une autre façon de supprimer tous les caractères HTML, mais en utilisant `batched=True` :
```python
new_drug_dataset = drug_dataset.map(
lambda x: {"review": [html.unescape(o) for o in x["review"]]}, batched=True
)
```
Si vous exécutez ce code dans un *notebook*, vous verrez que cette commande s'exécute beaucoup plus rapidement que la précédente. Et ce n'est pas parce que nos critiques ont déjà été scannées au format HTML. Si vous ré-exécutez l'instruction de la section précédente (sans `batched=True`), cela prendra le même temps qu'avant. En effet, les compréhensions de liste sont généralement plus rapides que l'exécution du même code dans une boucle `for` et nous gagnons également en performances en accédant à de nombreux éléments en même temps au lieu d'un par un.
L'utilisation de `Dataset.map()` avec `batched=True` est essentielle pour les *tokenizers rapides* que nous rencontrerons dans le [chapitre 6](/course/fr/chapter6) et qui peuvent rapidement tokeniser de grandes listes de textes. Par exemple, pour tokeniser toutes les critiques de médicaments avec un *tokenizer* rapide nous pouvons utiliser une fonction comme celle-ci :
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
def tokenize_function(examples):
return tokenizer(examples["review"], truncation=True)
```
Comme vous l'avez vu dans le [chapitre 3](/course/fr/chapter3), nous pouvons passer un ou plusieurs exemples au *tokenizer*. Nous pouvons donc utiliser cette fonction avec ou sans `batched=True`. Profitons-en pour comparer les performances des différentes options. Dans un *notebook*, vous pouvez chronométrer une instruction d'une ligne en ajoutant `%time` avant la ligne de code que vous souhaitez mesurer :
```python no-format
%time tokenized_dataset = drug_dataset.map(tokenize_function, batched=True)
```
Vous pouvez également chronométrer une cellule entière en mettant `%%time` au début de la cellule. Sur le matériel sur lequel nous avons exécuté cela, cela affichait 10,8 s pour cette instruction (c'est le nombre écrit après "Wall time").
<Tip>
✏️ **Essayez !** Exécutez la même instruction avec et sans `batched=True`, puis essayez-le avec un *tokenizer* lent (ajoutez `use_fast=False` dans la méthode `AutoTokenizer.from_pretrained()`) afin que vous puissiez voir quels temps vous obtenez sur votre matériel.
</Tip>
Voici les résultats que nous avons obtenus avec et sans *batching*, avec un *tokenizer* rapide et un lent :
Options | *Tokenizer* rapide | *Tokenizer* lent
:--------------:|:----------------:|:-----------------:
`batched=True` | 10.8s | 4min41s
`batched=False` | 59.2s | 5min3s
Cela signifie que l'utilisation d'un *tokenizer* rapide avec l'option `batched=True` est 30 fois plus rapide que son homologue lent sans batch. C'est vraiment incroyable ! C'est la raison principale pour laquelle les *tokenizers* rapides sont la valeur par défaut lors de l'utilisation de `AutoTokenizer` (et pourquoi ils sont appelés « rapides »). Ils sont capables d'atteindre une telle vitesse car en coulisses le code de tokenisation est exécuté en Rust qui est un langage facilitant la parallélisation de l'exécution du code.
La parallélisation est également la raison du gain de vitesse de près de 6 fois obtenue par le *tokenizer* rapide avec batch. Vous ne pouvez pas paralléliser une seule opération de tokenisation, mais lorsque vous souhaitez tokeniser de nombreux textes en même temps, vous pouvez simplement répartir l'exécution sur plusieurs processus. Chacun responsable de ses propres textes.
`Dataset.map()` possède aussi ses propres capacités de parallélisation. Comme elles ne sont pas soutenus par Rust, un *tokenizer* lent ne peut pas rattraper un rapide mais cela peut toujours être utile (surtout si vous utilisez un *tokenizer* qui n'a pas de version rapide). Pour activer le multitraitement, utilisez l'argument `num_proc` et spécifiez le nombre de processus à utiliser dans votre appel à `Dataset.map()` :
```py
slow_tokenizer = AutoTokenizer.from_pretrained("bert-base-cased", use_fast=False)
def slow_tokenize_function(examples):
return slow_tokenizer(examples["review"], truncation=True)
tokenized_dataset = drug_dataset.map(slow_tokenize_function, batched=True, num_proc=8)
```
Vous pouvez faire des tests pour déterminer le nombre optimal de processus à utiliser. Dans notre cas 8 semble produire le meilleur gain de vitesse. Voici les chiffres que nous avons obtenus avec et sans multitraitement :
Options | *Tokenizer* rapide | *Tokenizer* lent
:----------------------------:|:----------------:|:---------------:
`batched=True` | 10.8s | 4min41s
`batched=False` | 59.2s | 5min3s
`batched=True`, `num_proc=8` | 6.52s | 41.3s
`batched=False`, `num_proc=8` | 9.49s | 45.2s
Ce sont des résultats beaucoup plus raisonnables pour le *tokenizer* lent mais les performances du *tokenizer* rapide ont également été considérablement améliorées. Notez, cependant, que ce ne sera pas toujours le cas : pour des valeurs de `num_proc` autres que 8, nos tests ont montré qu'il était plus rapide d'utiliser `batched=True` sans cette option. En général, nous ne recommandons pas d'utiliser le multitraitement pour les *tokenizers* rapides avec `batched=True`.
<Tip>
Utiliser `num_proc` pour accélérer votre traitement est généralement une bonne idée tant que la fonction que vous utilisez n'effectue pas déjà une sorte de multitraitement.
</Tip>
Toutes ces fonctionnalités condensées en une seule méthode sont déjà assez étonnantes, mais il y a plus ! Avec `Dataset.map()` et `batched=True` vous pouvez modifier le nombre d'éléments dans votre jeu de données. Ceci est très utile dans de nombreuses situations où vous souhaitez créer plusieurs fonctionnalités d'entraînement à partir d'un exemple. Nous devrons le faire dans le cadre du prétraitement de plusieurs des tâches de traitement du langage naturel que nous entreprendrons dans le [chapitre 7](/course/fr/chapter7).
<Tip>
💡 En apprentissage automatique, un _exemple_ est généralement défini comme l'ensemble de _features_ que nous donnons au modèle. Dans certains contextes, ces caractéristiques seront l'ensemble des colonnes d'un `Dataset`, mais dans d'autres (comme ici et pour la réponse aux questions), plusieurs caractéristiques peuvent être extraites d'un seul exemple et appartenir à une seule colonne.
</Tip>
Voyons comment cela fonctionne ! Ici, nous allons tokeniser nos exemples et les tronquer à une longueur maximale de 128 mais nous demanderons au *tokenizer* de renvoyer *tous* les morceaux des textes au lieu du premier. Cela peut être fait avec `return_overflowing_tokens=True` :
```py
def tokenize_and_split(examples):
return tokenizer(
examples["review"],
truncation=True,
max_length=128,
return_overflowing_tokens=True,
)
```
Testons cela sur un exemple avant d'utiliser `Dataset.map()` sur le jeu de données :
```py
result = tokenize_and_split(drug_dataset["train"][0])
[len(inp) for inp in result["input_ids"]]
```
```python out
[128, 49]
```
Notre premier exemple du jeu d’entraînement est devenu deux caractéristiques car il a été segmenté à plus que le nombre maximum de *tokens* que nous avons spécifié : le premier de longueur 128 et le second de longueur 49. Faisons maintenant cela pour tous les éléments du jeu de données !
```py
tokenized_dataset = drug_dataset.map(tokenize_and_split, batched=True)
```
```python out
ArrowInvalid: Column 1 named condition expected length 1463 but got length 1000
```
Oh non ! Cela n'a pas fonctionné ! Pourquoi ? L'examen du message d'erreur nous donne un indice : il y a une incompatibilité dans les longueurs de l'une des colonnes. L'une étant de longueur 1 463 et l'autre de longueur 1 000. Si vous avez consulté la [documentation](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.map) de `Dataset.map()`, vous vous souvenez peut-être qu'il s'agit du nombre d'échantillons passés à la fonction que nous mappons. Ici, ces 1 000 exemples ont donné 1 463 nouvelles caractéristiques, entraînant une erreur de forme.
Le problème est que nous essayons de mélanger deux jeux de données différents de tailles différentes : les colonnes `drug_dataset` auront un certain nombre d'exemples (les 1 000 dans notre erreur), mais le `tokenized_dataset` que nous construisons en aura plus (le 1 463 dans le message d'erreur). Cela ne fonctionne pas pour un `Dataset`, nous devons donc soit supprimer les colonnes de l'ancien jeu de données, soit leur donner la même taille que dans le nouveau jeu de données. Nous pouvons faire la première option avec l'argument `remove_columns` :
```py
tokenized_dataset = drug_dataset.map(
tokenize_and_split, batched=True, remove_columns=drug_dataset["train"].column_names
)
```
Maintenant, cela fonctionne sans erreur. Nous pouvons vérifier que notre nouveau jeu de données contient beaucoup plus d'éléments que le jeu de données d'origine en comparant les longueurs :
```py
len(tokenized_dataset["train"]), len(drug_dataset["train"])
```
```python out
(206772, 138514)
```
Nous avons mentionné que nous pouvions également résoudre le problème de longueur non concordante en donnant aux anciennes colonnes la même taille que les nouvelles. Pour ce faire, nous avons besoin du champ `overflow_to_sample_mapping` que le *tokenizer* renvoie lorsque nous définissons `return_overflowing_tokens=True`. Il nous donne une correspondance entre un nouvel index de caractéristique et l'index de l'échantillon dont il est issu. Grâce à cela, nous pouvons associer chaque clé présente dans notre jeu de données d'origine à une liste de valeurs de la bonne taille en répétant les valeurs de chaque exemple autant de fois qu'il génère de nouvelles caractéristiques :
```py
def tokenize_and_split(examples):
result = tokenizer(
examples["review"],
truncation=True,
max_length=128,
return_overflowing_tokens=True,
)
# Extraire la correspondance entre les nouveaux et les anciens indices
sample_map = result.pop("overflow_to_sample_mapping")
for key, values in examples.items():
result[key] = [values[i] for i in sample_map]
return result
```
Nous pouvons voir que cela fonctionne avec `Dataset.map()` sans que nous ayons besoin de supprimer les anciennes colonnes :
```py
tokenized_dataset = drug_dataset.map(tokenize_and_split, batched=True)
tokenized_dataset
```
```python out
DatasetDict({
train: Dataset({
features: ['attention_mask', 'condition', 'date', 'drugName', 'input_ids', 'patient_id', 'rating', 'review', 'review_length', 'token_type_ids', 'usefulCount'],
num_rows: 206772
})
test: Dataset({
features: ['attention_mask', 'condition', 'date', 'drugName', 'input_ids', 'patient_id', 'rating', 'review', 'review_length', 'token_type_ids', 'usefulCount'],
num_rows: 68876
})
})
```
Nous obtenons le même nombre de caractéristiques d'entraînement qu'auparavant, mais ici nous avons conservé tous les anciens champs. Si vous en avez besoin pour un post-traitement après l'application de votre modèle, vous pouvez utiliser cette approche.
Vous avez maintenant vu comment 🤗 *Datasets* peut être utilisé pour prétraiter un jeu de données de différentes manières. Bien que les fonctions de traitement de 🤗 *Datasets* couvrent la plupart de vos besoins, il peut arriver que vous deviez passer à Pandas pour accéder à des fonctionnalités plus puissantes, telles que `DataFrame.groupby()` ou des API de haut niveau pour la visualisation. Heureusement, 🤗 *Datasets* est conçu pour être interopérable avec des bibliothèques telles que Pandas, NumPy, PyTorch, TensorFlow et JAX. Voyons comment cela fonctionne.
## De `Dataset` à `DataFrame` et vice versa
<Youtube id="tfcY1067A5Q"/>
Pour permettre la conversion entre diverses bibliothèques tierces, 🤗 *Datasets* fournit une fonction `Dataset.set_format()`. Cette fonction ne modifie que le _format de sortie_ du jeu de données. Vous pouvez donc facilement passer à un autre format sans affecter le _format de données_ sous-jacent, qui est Apache Arrow. Le formatage se fait sur place. Pour démontrer, convertissons notre jeu de données vers Pandas :
```py
drug_dataset.set_format("pandas")
```
Maintenant, lorsque nous accédons aux éléments du jeu de données, nous obtenons un `pandas.DataFrame` au lieu d'un dictionnaire :
```py
drug_dataset["train"][:3]
```
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>patient_id</th>
<th>drugName</th>
<th>condition</th>
<th>review</th>
<th>rating</th>
<th>date</th>
<th>usefulCount</th>
<th>review_length</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>95260</td>
<td>Guanfacine</td>
<td>adhd</td>
<td>"My son is halfway through his fourth week of Intuniv..."</td>
<td>8.0</td>
<td>April 27, 2010</td>
<td>192</td>
<td>141</td>
</tr>
<tr>
<th>1</th>
<td>92703</td>
<td>Lybrel</td>
<td>birth control</td>
<td>"I used to take another oral contraceptive, which had 21 pill cycle, and was very happy- very light periods, max 5 days, no other side effects..."</td>
<td>5.0</td>
<td>December 14, 2009</td>
<td>17</td>
<td>134</td>
</tr>
<tr>
<th>2</th>
<td>138000</td>
<td>Ortho Evra</td>
<td>birth control</td>
<td>"This is my first time using any form of birth control..."</td>
<td>8.0</td>
<td>November 3, 2015</td>
<td>10</td>
<td>89</td>
</tr>
</tbody>
</table>
Créons un `pandas.DataFrame` pour l'ensemble d'entraînement en sélectionnant tous les éléments de `drug_dataset["train"]` :
```py
train_df = drug_dataset["train"][:]
```
<Tip>
🚨 Sous le capot, `Dataset.set_format()` change le format de retour pour la méthode `__getitem__()`. Cela signifie que lorsque nous voulons créer un nouvel objet comme `train_df` à partir d'un `Dataset` au format `"pandas"`, nous devons découper tout le jeu de données pour obtenir un `pandas.DataFrame`. Vous pouvez vérifier par vous-même que le type de `drug_dataset["train"]` est `Dataset`, quel que soit le format de sortie.
</Tip>
De là, nous pouvons utiliser toutes les fonctionnalités Pandas que nous voulons. Par exemple, nous pouvons faire un chaînage sophistiqué pour calculer la distribution de classe parmi les entrées `condition` :
```py
frequencies = (
train_df["condition"]
.value_counts()
.to_frame()
.reset_index()
.rename(columns={"index": "condition", "condition": "frequency"})
)
frequencies.head()
```
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>condition</th>
<th>frequency</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>birth control</td>
<td>27655</td>
</tr>
<tr>
<th>1</th>
<td>depression</td>
<td>8023</td>
</tr>
<tr>
<th>2</th>
<td>acne</td>
<td>5209</td>
</tr>
<tr>
<th>3</th>
<td>anxiety</td>
<td>4991</td>
</tr>
<tr>
<th>4</th>
<td>pain</td>
<td>4744</td>
</tr>
</tbody>
</table>
Et une fois que nous avons terminé notre analyse Pandas, nous pouvons toujours créer un nouvel objet `Dataset` en utilisant la fonction `Dataset.from_pandas()` comme suit :
```py
from datasets import Dataset
freq_dataset = Dataset.from_pandas(frequencies)
freq_dataset
```
```python out
Dataset({
features: ['condition', 'frequency'],
num_rows: 819
})
```
<Tip>
✏️ **Essayez !** Calculez la note moyenne par médicament et stockez le résultat dans un nouveau jeu de données.
</Tip>
Ceci conclut notre visite des différentes techniques de prétraitement disponibles dans 🤗 *Datasets*. Pour compléter la section, créons un ensemble de validation pour préparer le jeu de données à l’entraînement d'un classifieur. Avant cela, nous allons réinitialiser le format de sortie de `drug_dataset` de `"pandas"` à `"arrow"` :
```python
drug_dataset.reset_format()
```
## Création d'un ensemble de validation
Bien que nous ayons un jeu de test que nous pourrions utiliser pour l'évaluation, il est recommandé de ne pas toucher au jeu de test et de créer un jeu de validation séparé pendant le développement. Une fois que vous êtes satisfait des performances de vos modèles sur l'ensemble de validation, vous pouvez effectuer une dernière vérification d'intégrité sur l'ensemble test. Ce processus permet d'atténuer le risque de surentraînement sur le jeu de test et de déployer un modèle qui échoue sur des données du monde réel.
🤗 *Datasets* fournit une fonction `Dataset.train_test_split()` basée sur la célèbre fonctionnalité de `scikit-learn`. Utilisons-la pour diviser notre ensemble d'entraînement `train` et `validation` (nous définissons l'argument `seed` pour la reproductibilité) :
```py
drug_dataset_clean = drug_dataset["train"].train_test_split(train_size=0.8, seed=42)
# Renommer la division par défaut "test" en "validation"
drug_dataset_clean["validation"] = drug_dataset_clean.pop("test")
# Ajoutez le jeu "test" à notre `DatasetDict`
drug_dataset_clean["test"] = drug_dataset["test"]
drug_dataset_clean
```
```python out
DatasetDict({
train: Dataset({
features: ['patient_id', 'drugName', 'condition', 'review', 'rating', 'date', 'usefulCount', 'review_length', 'review_clean'],
num_rows: 110811
})
validation: Dataset({
features: ['patient_id', 'drugName', 'condition', 'review', 'rating', 'date', 'usefulCount', 'review_length', 'review_clean'],
num_rows: 27703
})
test: Dataset({
features: ['patient_id', 'drugName', 'condition', 'review', 'rating', 'date', 'usefulCount', 'review_length', 'review_clean'],
num_rows: 46108
})
})
```
Génial, nous avons maintenant préparé un jeu de données prêt pour l'entraînement de certains modèles ! Dans la [section 5](/course/fr/chapter5/5), nous vous montrerons comment télécharger des jeux de données sur le *Hub*. Mais pour l'instant, terminons notre analyse en examinant quelques façons d'enregistrer des jeux de données sur votre ordinateur local.
## Enregistrer un jeu de données
<Youtube id="blF9uxYcKHo"/>
Bien que 🤗 *Datasets* mette en cache chaque jeu de données téléchargé et les opérations qui y sont effectuées, il y a des moments où vous voudrez enregistrer un jeu de données sur le disque (par exemple, au cas où le cache serait supprimé). Comme indiqué dans le tableau ci-dessous, 🤗 *Datasets* fournit trois fonctions principales pour enregistrer votre jeu de données dans différents formats :
| Format de données | Fonction |
| :---------------: | :----------------------: |
| Arrow | `Dataset.save_to_disk()` |
| CSV | `Dataset.to_csv()` |
| JSON | `Dataset.to_json()` |
Par exemple, enregistrons notre jeu de données nettoyé au format Arrow :
```py
drug_dataset_clean.save_to_disk("drug-reviews")
```
Cela créera un répertoire avec la structure suivante :
```
drug-reviews/
├── dataset_dict.json
├── test
│ ├── dataset.arrow
│ ├── dataset_info.json
│ └── state.json
├── train
│ ├── dataset.arrow
│ ├── dataset_info.json
│ ├── indices.arrow
│ └── state.json
└── validation
├── dataset.arrow
├── dataset_info.json
├── indices.arrow
└── state.json
```
où nous pouvons voir que chaque division est associée à sa propre table *dataset.arrow* et à certaines métadonnées dans *dataset_info.json* et *state.json*. Vous pouvez considérer le format Arrow comme un tableau sophistiqué de colonnes et de lignes optimisé pour la création d'applications hautes performances qui traitent et transportent de grands ensembles de données.
Une fois le jeu de données enregistré, nous pouvons le charger en utilisant la fonction `load_from_disk()` comme suit :
```py
from datasets import load_from_disk
drug_dataset_reloaded = load_from_disk("drug-reviews")
drug_dataset_reloaded
```
```python out
DatasetDict({
train: Dataset({
features: ['patient_id', 'drugName', 'condition', 'review', 'rating', 'date', 'usefulCount', 'review_length'],
num_rows: 110811
})
validation: Dataset({
features: ['patient_id', 'drugName', 'condition', 'review', 'rating', 'date', 'usefulCount', 'review_length'],
num_rows: 27703
})
test: Dataset({
features: ['patient_id', 'drugName', 'condition', 'review', 'rating', 'date', 'usefulCount', 'review_length'],
num_rows: 46108
})
})
```
Pour les formats CSV et JSON, nous devons stocker chaque fractionnement dans un fichier séparé. Pour ce faire, vous pouvez parcourir les clés et les valeurs de l'objet `DatasetDict` :
```py
for split, dataset in drug_dataset_clean.items():
dataset.to_json(f"drug-reviews-{split}.jsonl")
```
Cela enregistre chaque fractionnement au [format JSON Lines](https://jsonlines.org), où chaque ligne du jeu de données est stockée sous la forme d'une seule ligne de JSON. Voici à quoi ressemble le premier exemple :
```py
!head -n 1 drug-reviews-train.jsonl
```
```python out
{"patient_id":141780,"drugName":"Escitalopram","condition":"depression","review":"\"I seemed to experience the regular side effects of LEXAPRO, insomnia, low sex drive, sleepiness during the day. I am taking it at night because my doctor said if it made me tired to take it at night. I assumed it would and started out taking it at night. Strange dreams, some pleasant. I was diagnosed with fibromyalgia. Seems to be helping with the pain. Have had anxiety and depression in my family, and have tried quite a few other medications that haven't worked. Only have been on it for two weeks but feel more positive in my mind, want to accomplish more in my life. Hopefully the side effects will dwindle away, worth it to stick with it from hearing others responses. Great medication.\"","rating":9.0,"date":"May 29, 2011","usefulCount":10,"review_length":125}
# Il semble que je ressente les effets secondaires habituels de LEXAPRO : insomnie, baisse de la libido, somnolence pendant la journée. Je le prends le soir parce que mon médecin m'a dit de le prendre le soir s'il me fatiguait. J'ai supposé que ce serait le cas et j'ai commencé à le prendre la nuit. Rêves étranges, certains agréables. On m'a diagnostiqué une fibromyalgie. Il semble que ce médicament aide à soulager la douleur. J'ai eu de l'anxiété et de la dépression dans ma famille, et j'ai essayé plusieurs autres médicaments qui n'ont pas fonctionné. Cela ne fait que deux semaines que je prends ce médicament, mais je me sens plus positif dans mon esprit et je veux accomplir davantage dans ma vie. J'espère que les effets secondaires vont s'estomper, cela vaut la peine de s'y tenir d'après les réponses des autres. C'est un excellent médicament.
```
Nous pouvons ensuite utiliser les techniques de [section 2](/course/fr/chapter5/2) pour charger les fichiers JSON comme suit :
```py
data_files = {
"train": "drug-reviews-train.jsonl",
"validation": "drug-reviews-validation.jsonl",
"test": "drug-reviews-test.jsonl",
}
drug_dataset_reloaded = load_dataset("json", data_files=data_files)
```
Et c'est tout pour notre excursion dans la manipulation des données avec 🤗 *Datasets* ! Maintenant que nous disposons d'un ensemble de données nettoyé pour entraîner un modèle, voici quelques idées que vous pouvez essayer :
1. Utilisez les techniques du [chapitre 3](/course/fr/chapter3) pour entraîner un classifieur capable de prédire l'état du patient en fonction de l'examen du médicament.
2. Utilisez le pipeline `summarization` du [chapitre 1](/course/fr/chapter1) pour générer des résumés des révisions.
Ensuite, nous verrons comment 🤗 *Datasets* peut vous permettre de travailler avec d'énormes jeux de données sans faire exploser votre ordinateur portable !
| course/chapters/fr/chapter5/3.mdx/0 | {
"file_path": "course/chapters/fr/chapter5/3.mdx",
"repo_id": "course",
"token_count": 15613
} | 126 |
# <i>Tokenizer</i>, coché !
<CourseFloatingBanner
chapter={6}
classNames="absolute z-10 right-0 top-0"
/>
Bon travail pour finir ce chapitre !
Après cette plongée en profondeur dans les *tokenizers*, vous devriez :
- être capable d'entraîner un nouveau tokenizer en utilisant un ancien tokenizer comme modèle,
- comprendre comment utiliser les *offsets* pour faire correspondre la position des *tokens* à l'étendue de texte d'origine,
- connaître les différences entre BPE, *WordPiece* et *Unigram*,
- être capable de combiner les blocs fournis par la bibliothèque 🤗 *Tokenizers* pour construire votre propre *tokenizer*,
- être capable d'utiliser ce *tokenizer* dans la bibliothèque 🤗 *Transformers*.
| course/chapters/fr/chapter6/9.mdx/0 | {
"file_path": "course/chapters/fr/chapter6/9.mdx",
"repo_id": "course",
"token_count": 262
} | 127 |
# Partie 2 terminée !
<CourseFloatingBanner
chapter={8}
classNames="absolute z-10 right-0 top-0"
/>
Félicitations, vous avez terminé la deuxième partie du cours ! Nous travaillons activement sur la troisième alors inscrivez-vous à notre [*newsletter*](https://huggingface.curated.co/) pour être sûr de ne pas manquer sa sortie.
Vous devriez maintenant être en mesure d'aborder une série de tâches de NLP et de *finetuner* ou de prétraîner un modèle sur celles-ci. N'oubliez pas de partager vos résultats avec la communauté sur le [*Hub*](https://huggingface.co/models).
Nous sommes impatients de voir ce que vous allez construire avec les connaissances que vous avez acquises !
| course/chapters/fr/chapter8/6.mdx/0 | {
"file_path": "course/chapters/fr/chapter8/6.mdx",
"repo_id": "course",
"token_count": 258
} | 128 |
# પરિચય
હગિંગ ફેસ પર આપનું સ્વાગત છે! આ પરિચય તમને કામનું વાતાવરણ ગોઠવવામાં માર્ગદર્શન આપશે. જો તમે હમણાં જ અભ્યાસક્રમ સાથે પ્રારંભ કરી રહ્યાં છો, તો અમે ભલામણ કરીએ છીએ કે તમે પહેલા [પ્રકરણ 1](https://github.com/huggingface/course/blob/main/course/chapter1) પર એક નજર નાખો, પછી પાછા આવો અને તમારું Environment સેટ કરો જેથી તમે ‘કોડ’ જાતે અજમાવી શકો.
આ કોર્સમાં આપણે જે લાઈબ્રેરીનો ઉપયોગ કરીશું તે Python Package તરીકે ઉપલબ્ધ છે, તેથી અહીં અમે તમને બતાવીશું કે Python Environment કેવી રીતે સેટ કરવું અને તમને જોઈતી વિશિષ્ટ લાઈબ્રેરીઓ કેવી રીતે ઇન્સ્ટોલ કરવી.
Colab Notebook અથવા Python Virtual એન્વાયર્નમેન્ટનો ઉપયોગ કરીને અમે તમારા કામનું વાતાવરણ સેટ કરવાની બે રીતે આવરી લઈશું. તમને જે સૌથી વધુ પસંદ હોય તે ઉપયોગ કરો. નવા નિશાળિયા માટે, અમે ભલામણ કરીએ છીએ કે તમે Colab નોટબુકથી શરૂઆત કરો.
નોંધ કરો કે અમે Windows System ને આવરી શું નહીં. જો તમે Windows ચલાવી રહ્યાં હોવ, તો અમે તેને અનુસરવા માટે Colab Notebook નો ઉપયોગ કરવાનો સુઝાવ આપીએ છીએ. જો તમે Linux વિતરણ અથવા MacOS નો ઉપયોગ કરી રહ્યાં છો, તો તમે અહીં વર્ણવેલ કોઈપણ અભિગમનો ઉપયોગ કરી શકો છો.
અલબત્ત મોટાભાગનો આધાર તમારા હગિંગ ફેસ એકાઉન્ટ પર છે. અમે હમણાં એક ખાતું બનાવવાની ભલામણ કરીએ છીએ: [ખાતું અહીં બનાવો](https://huggingface.co/join)
## Google Colab Notebook(ગૂગલ કોલાબ નોટબુક) ની મદદ થી
હુગિંગફેસ(huggingface) નું સૌથી આસાન સેટઅપ Google Colab નોટબુક થી કરી શકાય. તમારા વેબ બ્રાઉઝર માં colab ઓપન કરો.
જો તમે પેહલા colab થી પરિચિત ના હોવ, તો [પરિચય](https://colab.research.google.com/notebooks/intro.ipynb). થી શરૂઆત કરવી. Colab તમને advanced hardware જેમકે GPU અથવા TPU આપશે, જે નાના prototype માટે વિના મૂલ્યે વાપરી શકાય.
જો તમને એક વાર colab ફાવી જાય તો નવી નોટબુક open કરી જરૂરી પેકેજ install કરી શકાય જે setup કરવા માટે અત્યંત જરૂરી છે.:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/new_colab.png" alt="An empty colab notebook" width="80%"/>
</div>
હવે આપણે libraries install કરીશું જે આખા course ma વપરાશે. આપણે વાપરીશું installation માટે, જે python ma પેકેજ મેનેજર છે. Notebook ના cells માં તમે કમાંડ run કરી શકો જો તમે એને થી શરૂ કરો. તમે ને આ રીતે કરી શકો:
```
!pip install transformers
```
જો આપને ચકાસવું હોય કે પેકેજ બરાબર install થયું છે કે નહિ તો આ રીતે કરી શકાય:
```
import transformers
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/install.gif" alt="A gif showing the result of the two commands above: installation and import" width="80%"/>
</div>
આ આગળ નો command અમુક જ પેકેજ install કરશે. એ મશીનલનિંગ ના ફ્રેમવર્ક જેમકે (Tensorflow અને Pytorch) ઇન્સ્ટોલ નઈ કરે. આ course માં આપણે ઘણાં ફિચર્સ જોઈશું એટલે હું development વર્સન install કરવાની સલાહ આપીશ, કે જેમાં બધા પેકેજ અને જરૂરી લાઇબ્રેરી install એક સાથે આવશે:
```
!pip install transformers[sentencepiece]
```
આ સ્ટેપ run થતાં થોડો ટાઈમ લાગશે, પણ એનાથી આગળ ના પ્રકરણ માં સારું પડશે!
## Python Virtual Environment ની મદદ થી
જો તમને python virtual environment અનુકૂળ આવતું હોય તો પેહલું તબકું એ તમારા system માં python install છે. અમે આ [guide](https://realpython.com/installing-python/) અનુસરવાનું કહીશું.
એકવાર python install થઈ જાય એટલે તમારા system ના terminal માં python command run કરી શકવા જોઈએ. જો તને તપાસવા માંગતા હોવ તો આ રન કરી શકો. આ command python નું version આપશે.
જ્યારે તમે python command run કરો, જેમકે `python --version`, ત્યારે એ તમારી કમ્પ્યુટર સિસ્ટમ ના મુખ્ય python environment માં run થશે. અમે મુખ્ય python environment માં કઈ પણ install ન કરવાનો સુજાવ કરીએ છીએ. દરેક application માટે, અલગ environment વાપરવું, એમ કરવાથી દરેક application ને પેકેજ અને dependency અલગ અલગ રહેશે. આમ કરવાથી બીજી application સાથે compatibility ની સમસ્યા નઈ આવે.
Python માં આ બધું [*virtual environments*](https://docs.python.org/3/tutorial/venv.html) થી થાય છે, આ virtual environment તમને અલગ ફોલ્ડર બનાઈ આપશે જેવા જરૂરી python version સાથે packages હશે જે તમને application માટે જરૂરી હશે. આ રીતનું virtual environment ઘણી રીતે બનાવી શકાય. આપણે python નું official tool [`venv`](https://docs.python.org/3/library/venv.html#module-venv) વાપરીશું.
સૌથી પેહલા એક ફોલ્ડર બનાવો કે જેમાં તમારી application નો code રેહશે.દાખલા તરીકે, તમે તમરી હોમ ફોલ્ડર માં *transformers-course* નામનું ફોલ્ડર બનાવો છો:
```
mkdir ~/transformers-course
cd ~/transformers-course
```
એ ફોલ્ડર માં, python ના `venv` મોડ્યુલ ની મદદ થી virtual environment બનાવો:
```
python -m venv .env
```
તમને પેહલા ના ખાલી ફોલ્ડર માં *.env* નામનું ફોલ્ડર દેખાશે:
```
ls -a
```
```out
. .. .env
```
તમેં તમારા virtual environment ને ઉસ કરવા `activate` અને `deactivate` ના વાપરવું હોય તો સ્ક્રિપ્ટ વાપરી શકો:
```
# Activate the virtual environment
source .env/bin/activate
# Deactivate the virtual environment
source .env/bin/deactivate
```
જો તમે verify કરવા માંગતા હોવ તો `which python` command run કરો. એ તમરા virtual environment ના ફોલ્ડર ને આઉટપુટ માં આપશે. આ એવું સાબિત કરે છે કે virtual environment સફળાપૂર્વક active છે.!
```
which python
```
```out
/home/<user>/transformers-course/.env/bin/python
```
### Installing dependencies
જેમ આપણે પેહલા ના colab વાળા સેકશન માં કરેલું એમ, આપણે પેકેજ ઇન્સ્ટોલ કરીશું. આપણે `pip` પેકેજ મેનેજર ની મદદ થી 🤗 `transformers` નું ડેવલપમેન્ટ વર્સન ઇન્સ્ટોલ કરીશું:
```
pip install "transformers[sentencepiece]"
```
હવે તમારું સિસ્ટમ સેટઅપ થઈ ગયું છે અને તમે આગળ વધવા માટે સક્ષમ છો! | course/chapters/gj/chapter0/1.mdx/0 | {
"file_path": "course/chapters/gj/chapter0/1.mdx",
"repo_id": "course",
"token_count": 8120
} | 129 |
<FrameworkSwitchCourse {fw} />
# परिचय
<CourseFloatingBanner
chapter={3}
classNames="absolute z-10 right-0 top-0"
/>
[अध्याय 2](/course/chapter2) में हमने जाना कि कैसे भविष्यवाणी करने के लिए टोकननाइज़र और पूर्व-प्रशिक्षित मॉडल का उपयोग किया जाता है । लेकिन तब क्या यदि आप अपने स्वयं के डेटासेट के लिए एक पूर्व-प्रशिक्षित मॉडल को ठीक करना चाहते हैं? यही इस अध्याय का विषय है! आप सीखेंगे कि:
{#if fw === 'pt'}
* हब से एक बड़ा डेटासेट कैसे तैयार किया जाता है
* किसी मॉडल को फाइन-ट्यून करने के लिए उच्च स्तरीय `Trainer` API का उपयोग कैसे करें
* तदनुकूल प्रशिक्षण लूप का उपयोग कैसे करें
* किसी भी वितरित सेटअप पर उस तदनुकूल प्रशिक्षण लूप को आसानी से चलाने के लिए 🤗 एक्सेलेरेट लाइब्रेरी का लाभ कैसे उठाएं
{:else}
* हब से एक बड़ा डेटासेट कैसे तैयार करें
* मॉडल को फाइन-ट्यून करने के लिए Keras का उपयोग कैसे करें
* पूर्वानुमान लगाने के लिए Keras का उपयोग कैसे करें
* कस्टम मीट्रिक का उपयोग कैसे करें
{/if}
हगिंग फेस हब पर अपनी प्रशिक्षित चौकियों को अपलोड करने के लिए, आपको एक huggingface.co खाते की आवश्यकता होगी: [खाता बनाएं](https://huggingface.co/join) | course/chapters/hi/chapter3/1.mdx/0 | {
"file_path": "course/chapters/hi/chapter3/1.mdx",
"repo_id": "course",
"token_count": 1359
} | 130 |
# Cosa fanno i Transformer?
<CourseFloatingBanner chapter={1}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/it/chapter1/section3.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/it/chapter1/section3.ipynb"},
]} />
In questa sezione, vedremo di cosa sono capaci i modelli Transformer e useremo il nostro primo strumento della libreria 🤗 Transformer: la funzione `pipeline()`.
<Tip>
👀 Lo vedi il pulsante <em>Open in Colab</em> in alto a destra? Cliccalo per aprire il blocco note Colab di Google che contiene tutti gli esempi di codice di questa sezione. Ritroverai il pulsante in ogni sezione che contiene esempi di codice.
Se intendi compilare gli esempi localmente, ti consigliamo di dare un occhio alla sezione <a href="/course/chapter0">setup</a>.
</Tip>
## I Transformer sono ovunque!
I modelli Transformer sono utilizzati per eseguire qualsiasi compito di NLP, come ad esempio quelli menzionati nelle sezioni precedenti. Ecco alcune delle aziende e organizzazioni che utilizzano Hugging Face e i modelli Transformer, e contribuiscono a loro volta alla comunità condividendo i propri modelli:
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/companies.PNG" alt="Companies using Hugging Face" width="100%">
La [libreria 🤗 Transformer](https://github.com/huggingface/transformers) fornisce la funzionalità per creare e utilizzare questi modelli condivisi. Il [Model Hub](https://huggingface.co/models) contiene migliaia di modelli pre-addestrati che possono essere scaricati e usati liberamente. Puoi anche caricare i tuoi modelli nell'Hub!
<Tip>
⚠️ L'Hugging Face Hub non si limitata ai soli modelli Transformer. Chiunque può condividere qualsiasi tipo di modello o dataset (<em>insieme di dati</em>)! <a href="https://huggingface.co/join">Crea un profilo huggingface.co</a> per approfittare di tutte le funzioni disponibili!
</Tip>
Prima di scoprire come funzionino i modelli Transformer dietro le quinte, vediamo qualche esempio di come questi possano essere utilizzati per risolvere alcuni problemi interessanti di NLP.
## Lavorare con le pipeline
<Youtube id="tiZFewofSLM" />
L'oggetto più basilare della libreria 🤗 Transformer è la funzione `pipeline()`. Questa connette un modello con tutte le fasi necessarie di preprocessing e postprocessing, permettendoci così di fornire un qualsiasi testo come input diretto e ottenere una risposta intelligibile:
```python
from transformers import pipeline
classifier = pipeline("sentiment-analysis")
classifier("I've been waiting for a HuggingFace course my whole life.")
```
```python out
[{'label': 'POSITIVE', 'score': 0.9598047137260437}]
```
È anche possibile lavorare su più frasi!
```python
classifier(
["I've been waiting for a HuggingFace course my whole life.", "I hate this so much!"]
)
```
```python out
[{'label': 'POSITIVE', 'score': 0.9598047137260437},
{'label': 'NEGATIVE', 'score': 0.9994558095932007}]
```
Per default, questa pipeline seleziona un preciso modello pre-addestrato che è stato affinato per il sentiment analysis in inglese. Quando creiamo l'oggetto `classifier`, il modello viene scaricato e memorizzato nella cache. Se inizializziamo di nuovo il comando, verrà utilizzato il modello salvato nella cache e non ci sarà quindi bisogno di scaricare di nuovo il modello.
Tre passaggi principali sono coinvolti quando passiamo del testo in un pipeline:
1. Il testo è pre-elaborato in un formato che il modello può capire.
2. Gli input pre-elaborati vengono passati al modello.
3. Le previsioni del modello sono post-elaborate in un formato accessibile all'utilizzatore.
Tra le [pipeline disponibili](https://huggingface.co/transformers/main_classes/pipelines.html) al momento ci sono:
- `feature-extraction` (per ottenere la rappresentazione vettoriale di un testo)
- `fill-mask`
- `ner` (riconoscimento delle entità nominate, *named entity recognition*)
- `question-answering`
- `sentiment-analysis`
- `summarization`
- `text-generation`
- `translation`
- `zero-shot-classification`
Proviamo a vederne alcune!
## Classificazione zero-shot
Cominceremo con l'affrontare un compito impegnativo che consiste nella classificazione di testi non etichettati. Si tratta di uno scenario comune in molti progetti pratici perché l'annotazione testuale richiede tempo e competenza settoriale. In questo caso d'uso, la pipeline `zero-shot-classification` è molto potente e permette di specificare le etichette da utilizzare per la classificazione, in modo da non dover fare affidamento sulle etichette del modello pre-addestrato. Abbiamo già visto come il modello riesca a classificare una frase utilizzando le etichette 'positiva' e 'negativa', ma è anche possibile classificare testi utilizzando una qualsiasi serie di etichette di nostra scelta.
```python
from transformers import pipeline
classifier = pipeline("zero-shot-classification")
classifier(
"This is a course about the Transformers library",
candidate_labels=["education", "politics", "business"],
)
```
```python out
{'sequence': 'This is a course about the Transformers library',
'labels': ['education', 'business', 'politics'],
'scores': [0.8445963859558105, 0.111976258456707, 0.043427448719739914]}
```
Questa pipeline si chiama _zero-shot_ perché non hai bisogno di affinare il modello usando i tuoi dati per poterlo utilizzare. È direttamente in grado di generare una previsione probabilistica per qualsiasi lista di etichette tu voglia!
<Tip>
✏️ **Provaci anche tu!** Divertiti creando sequenze ed etichette e osserva come si comporta il modello.
</Tip>
## Generazione di testi
Vediamo ora come utilizzare la pipeline per generare testi. L'idea è di fornire un prompt (*richiesta*) che verrà auto-completato dal modello, il quale genererà il testo mancante. Si tratta di un compito simile alla funzione di scrittura facilitata che troviamo al giorno d'oggi in molti cellulari. La generazione di testi presenta una componente arbitraria, per cui non essere sorpreso/a se non ottieni gli stessi risultati che mostriamo qui sotto.
```python
from transformers import pipeline
generator = pipeline("text-generation")
generator("In this course, we will teach you how to")
```
```python out
[{'generated_text': 'In this course, we will teach you how to understand and use '
'data flow and data interchange when handling user data. We '
'will be working with one or more of the most commonly used '
'data flows — data flows of various types, as seen by the '
'HTTP'}]
```
Usando l'argomento `num_return_sequences` puoi controllare quante sequenze diverse vengono generate e, con l'argomento `max_length`, la lunghezza totale dell'output testuale.
<Tip>
✏️ **Provaci anche tu!** Usa gli argomenti `num_return_sequences` e `max_length` per generare due frasi di 15 parole ciascuna.
</Tip>
## Utilizzo di un qualsiasi modello dell'Hub in una pipeline
Gli esempi precedenti utilizzavano il modello di default per il compito dato, ma puoi anche scegliere un modello particolare dell'Hub da utilizzare in una pipeline per un compito specifico, come ad esempio la generazione testuale. Vai al [Model Hub](https://huggingface.co/models) e clicca sull'etichetta corrispondente a destra, in modo da mostrare solo i modelli supportati per il compito in questione. Dovresti ritrovarti in una pagina come [questa](https://huggingface.co/models?pipeline_tag=text-generation).
Proviamo il modello [`distilgpt2`](https://huggingface.co/distilgpt2)! Ecco come caricarlo nella pipeline usata in precedenza:
```python
from transformers import pipeline
generator = pipeline("text-generation", model="distilgpt2")
generator(
"In this course, we will teach you how to",
max_length=30,
num_return_sequences=2,
)
```
```python out
[{'generated_text': 'In this course, we will teach you how to manipulate the world and '
'move your mental and physical capabilities to your advantage.'},
{'generated_text': 'In this course, we will teach you how to become an expert and '
'practice realtime, and with a hands on experience on both real '
'time and real'}]
```
Puoi affinare la ricerca di un modello cliccando sulle etichette corrispondenti alle lingue, e scegliere in seguito un modello che generi testo in un'altra lingua. Il Model Hub contiene anche checkpoint per modelli multilingue che supportano numerose lingue.
Quando avrai selezionato un modello cliccando su di esso, vedrai che esiste un widget che ti permette di provarlo direttamente online. In questo modo, puoi testare velocemente le capacità del modello prima di scaricarlo.
<Tip>
✏️ **Provaci anche tu!** Usa i filtri per trovare un modello di generazione testuale per un'altra lingua. Sentiti libero/a di divertirti con il widget e usalo in una pipeline!
</Tip>
### La Inference API
Tutti i modelli possono essere testati direttamente attraverso il tuo browser utilizzando l'Inference API che trovi nel [sito](https://huggingface.co/) di Hugging Face. Puoi divertirti con il modello direttamente in questa pagina, inserendo testo personalizzato e osservando come il modello processi i dati fornitigli.
La Inference API che alimenta il widget è disponibile anche come prodotto a pagamento, il che è comodo se ne hai bisogno per i tuoi flussi di lavoro. Vedi la [pagina dei prezzi](https://huggingface.co/pricing) per maggiori informazioni.
## Mask filling
La prossima pipeline che proverai è `fill-mask`. L'idea di questo compito è di completare gli spazi bianchi in un dato testo:
```python
from transformers import pipeline
unmasker = pipeline("fill-mask")
unmasker("This course will teach you all about <mask> models.", top_k=2)
```
```python out
[{'sequence': 'This course will teach you all about mathematical models.',
'score': 0.19619831442832947,
'token': 30412,
'token_str': ' mathematical'},
{'sequence': 'This course will teach you all about computational models.',
'score': 0.04052725434303284,
'token': 38163,
'token_str': ' computational'}]
```
L'argomento `top_k` gestisce il numero di possibilità che vuoi mostrare. Nota che qui il modello inserisce la `<mask>` word speciale, la quale viene spesso chiamata *mask token*. Altri modelli di tipo mask-filling potrebbero avere mask token diversi, quindi è sempre bene verificare quale sia la corretta mask word quando esploriamo nuovi modelli. Un modo per verificarla consiste nel trovare la mask word utilizzata nel widget.
<Tip>
✏️ **Provaci anche tu!** Cerca il modello `bert-base-cased` nell'Hub e identifica la sua mask word nel widget dell'Inference API. Cosa predice questo modello per la frase nel nostro esempio `pipeline` qui sopra?
</Tip>
## Riconoscimento delle entità nominate
Il riconoscimento delle entità nominate (*Named entity recognition*, NER) è un compito in cui il modello deve determinare quali parti dell'input testuale corrispondono a entità quali persone, località, o organizzazioni. Guardiamo a un esempio:
```python
from transformers import pipeline
ner = pipeline("ner", grouped_entities=True)
ner("My name is Sylvain and I work at Hugging Face in Brooklyn.")
```
```python out
[{'entity_group': 'PER', 'score': 0.99816, 'word': 'Sylvain', 'start': 11, 'end': 18},
{'entity_group': 'ORG', 'score': 0.97960, 'word': 'Hugging Face', 'start': 33, 'end': 45},
{'entity_group': 'LOC', 'score': 0.99321, 'word': 'Brooklyn', 'start': 49, 'end': 57}
]
```
Qui il modello ha correttamente identificato che Sylvain è una persona (PER), Hugging Face un'organizzazione (ORG), e Brooklyn una località (LOC).
Passiamo l'opzione `grouped_entities=True` nella funzione di creazione della pipeline per raggruppare le parti frasali che corrispondono alla stessa entità: qui il modello raggruppa correttamente "Hugging" e "Face" come singola organizzazione, nonostante il nome sia formato da più parole. A dire il vero, come vedremo nel prossimo capitolo, il preprocessing divide perfino alcune parole in parti più piccole. Ad esempio, `Sylvain` viene suddiviso in quattro parti: `S`, `##yl`, `##va`, and `##in`. Al momento del post-processing, la pipeline raggruppa le parti con successo.
<Tip>
✏️ **Provaci anche tu!** Nel Model Hub, cerca un modello capace di effettuare part-of-speech tagging (comunemente abbreviato come POS) in inglese. Cosa predice il modello per la frase nell'esempio qui sopra?
</Tip>
## Risposta a domande
La pipeline `question-answering` risponde a domande utilizzando informazioni da un contesto prestabilito:
```python
from transformers import pipeline
question_answerer = pipeline("question-answering")
question_answerer(
question="Where do I work?",
context="My name is Sylvain and I work at Hugging Face in Brooklyn",
)
```
```python out
{'score': 0.6385916471481323, 'start': 33, 'end': 45, 'answer': 'Hugging Face'}
```
Nota che questa pipeline non genera risposte ma estrae informazioni da un contesto fornito.
## Riassunto
Quello del riassunto è un compito che trasforma un testo in un testo più breve, conservando tutti (o quasi) gli argomenti più importanti del testo di partenza. Ecco un esempio:
```python
from transformers import pipeline
summarizer = pipeline("summarization")
summarizer(
"""
America has changed dramatically during recent years. Not only has the number of
graduates in traditional engineering disciplines such as mechanical, civil,
electrical, chemical, and aeronautical engineering declined, but in most of
the premier American universities engineering curricula now concentrate on
and encourage largely the study of engineering science. As a result, there
are declining offerings in engineering subjects dealing with infrastructure,
the environment, and related issues, and greater concentration on high
technology subjects, largely supporting increasingly complex scientific
developments. While the latter is important, it should not be at the expense
of more traditional engineering.
Rapidly developing economies such as China and India, as well as other
industrial countries in Europe and Asia, continue to encourage and advance
the teaching of engineering. Both China and India, respectively, graduate
six and eight times as many traditional engineers as does the United States.
Other industrial countries at minimum maintain their output, while America
suffers an increasingly serious decline in the number of engineering graduates
and a lack of well-educated engineers.
"""
)
```
```python out
[{'summary_text': ' America has changed dramatically during recent years . The '
'number of engineering graduates in the U.S. has declined in '
'traditional engineering disciplines such as mechanical, civil '
', electrical, chemical, and aeronautical engineering . Rapidly '
'developing economies such as China and India, as well as other '
'industrial countries in Europe and Asia, continue to encourage '
'and advance engineering .'}]
```
Come nella generazione di testi, puoi specificare un `max_length` o `min_length` per il testo da generare.
## Traduzione
Per compiti di traduzione, puoi utilizzare un modello di default indicando la coppia linguistica nel nome del compito (come ad esempio `"translation_en_to_fr"`), anche se il metodo più semplice è di scegliere il modello che desideri utilizzare dal [Model Hub](https://huggingface.co/models). Qui in seguito traduciamo dal francese all'inglese:
```python
from transformers import pipeline
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-fr-en")
translator("Ce cours est produit par Hugging Face.")
```
```python out
[{'translation_text': 'This course is produced by Hugging Face.'}]
```
Come per le funzioni di generazione testuale e riassunto, è possibile specificare un `max_length` o un `min_length` per il risultato.
<Tip>
✏️ **Provaci anche tu!** Cerca modelli di traduzione in altre lingue e prova a tradurre la frase precedente in un paio di lingue diverse.
</Tip>
Finora abbiamo mostrato pipeline a solo scopo dimostrativo. Tali pipeline sono state programmate per compiti ben specifici e non sono in grado di eseguire variazioni di questi ultimi. Nel prossimo capitolo, imparerai cosa si nasconde dentro la funzione `pipeline()` e come personalizzarne il comportamento.
| course/chapters/it/chapter1/3.mdx/0 | {
"file_path": "course/chapters/it/chapter1/3.mdx",
"repo_id": "course",
"token_count": 5715
} | 131 |
<FrameworkSwitchCourse {fw} />
# Affinare il modello con la Trainer API
<DocNotebookDropdown
classNames="absolute z-10 right-0 top-0"
options={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/it/chapter3/section3.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/it/chapter3/section3.ipynb"},
]} />
<Youtube id="nvBXf7s7vTI"/>
🤗 Transformers fornisce una classe `Trainer` (addestratore) per aiutare con l'affinamento di uno qualsiasi dei modelli pre-addestrati nel dataset. Dopo tutto il lavoro di preprocessing nella sezione precedente, rimangono giusto gli ultimi passi per definire il `Trainer`. Probabilmente la parte più complicata sarà preparare l'ambiente per eseguire `Trainer.train()`, poiché sarà molto lento su una CPU. Se non avete una GPU a disposizione, potete avere accesso gratuitamente a GPU o TPU su [Google Colab](https://colab.research.google.com/).
Gli esempi di codice qui sotto partono dal presupposto che gli esempi nella sezione precedente siano già stati eseguiti. Ecco un breve riassunto di cosa serve:
```py
from datasets import load_dataset
from transformers import AutoTokenizer, DataCollatorWithPadding
raw_datasets = load_dataset("glue", "mrpc")
checkpoint = "bert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
def tokenize_function(example):
return tokenizer(example["sentence1"], example["sentence2"], truncation=True)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
```
### Addestramento
Il primo passo per definire un `Trainer` è la definizione di una classe `TrainingArguments` che contenga tutti gli iperparametri che verranno usati dal `Trainer` per l'addestramento e la valutazione. L'unico parametro da fornire è la cartella dove verranno salvati il modello addestrato e i vari checkpoint. Per tutto il resto si possono lasciare i parametri di default, che dovrebbero funzionare bene per un affinamento di base.
```py
from transformers import TrainingArguments
training_args = TrainingArguments("test-trainer")
```
<Tip>
💡 Se si vuole caricare automaticamente il modello all'Hub durante l'addestramento, basta passare `push_to_hub=True` come parametro nei `TrainingArguments`. Maggiori dettagli verranno forniti nel [Capitolo 4](/course/chapter4/3).
</Tip>
Il secondo passo è definire il modello. Come nel [capitolo precedente](/course/chapter2), utilizzeremo la classe `AutoModelForSequenceClassification` con due label:
```py
from transformers import AutoModelForSequenceClassification
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
```
Diversamente dal [Capitolo 2](/course/chapter2), un avviso di avvertimento verrà visualizzato dopo aver istanziato questo modello pre-addestrato. Ciò avviene perché BERT non è stato pre-addestrato per classificare coppie di frasi, quindi la testa del modello pre-addestrato viene scartata e una nuova testa adeguata per il compito di classificazione di sequenze è stata inserita. Gli avvertimenti indicano che alcuni pesi non verranno usati (quelli corrispondenti alla testa scartata del modello pre-addestrato) e che altri pesi sono stati inizializzati con valori casuali (quelli per la nuova testa). L'avvertimento viene concluso con un'esortazione ad addestrare il modello, che è esattamente ciò che stiamo per fare.
Una volta ottenuto il modello, si può definire un `Trainer` passandogli tutti gli oggetti costruiti fino ad adesso — il `model`, i `training_args`, i dataset di addestramento e validazione, il `data_collator`, e il `tokenizer`:
```py
from transformers import Trainer
trainer = Trainer(
model,
training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
)
```
Quando si passa l'argomento `tokenizer` come appena fatto, il `data_collator` usato di default dal `Trainer` sarà del tipo `DataCollatorWithPadding`, come definito precedentemente, quindi si potrebbe evitare di specificare l'argomento `data_collator=data_collator` in questa chiamata. Tuttavia era comunque importante mostrare questa parte del processing nella sezione 2!
Per affinare il modello sul nostro dataset, bisogna solo chiamare il metodo `train()` del `Trainer`:
```py
trainer.train()
```
Questo farà partire l'affinamento (che richiederà un paio di minuti su una GPU) e produrrà un report della funzione obiettivo dell'addestramento ogni 500 passi. Tuttavia, non vi farà sapere quanto sia buona (o cattiva) la performance del modello. Ciò è dovuto al fatto che:
1. Non è stato detto al `Trainer` di valutare il modello durante l'addestramento, settando `evaluation_strategy` o al valore `"steps"` (valuta il modello ogni `eval_steps`) oppure al valore `"epoch"` (valuta il modello alla fine di ogni epoca).
2. Non è stato fornito al `Trainer` una funzione `compute_metrics()` per calcolare le metriche di valutazione (altrimenti la valutazione stamperebbe solo il valore della funzione obiettivo, che non è un valore molto intuitivo).
### Valutazione
Vediamo come si può costruire una funzione `compute_metrics()` utile e usarla per il prossimo addestramento. La funzione deve prendere come parametro un oggetto `EvalPrediction` (che è una named tuple avente un campo `predictions` – predizioni – e un campo `label_ids` – id delle etichette –) e restituirà un dizionario che associa stringhe a numeri floating point (le stringhe saranno i nomi delle metriche, i numeri i loro valori). Per ottenere delle predizioni, si può usare il comando `Trainer.predict()`:
```py
predictions = trainer.predict(tokenized_datasets["validation"])
print(predictions.predictions.shape, predictions.label_ids.shape)
```
```python out
(408, 2) (408,)
```
Il risultato del metodo `predict()` è un'altra named tuple con tre campi: `predictions`, `label_ids`, e `metrics`. Il campo `metrics` conterrà solo il valore della funzione obiettivo sul dataset, in aggiunta ad alcune metriche legate al tempo (il tempo necessario per calcolare le predizioni, in totale e in media). Una volta completata la funzione `compute_metrics()` e passata al `Trainer`, quel campo conterrà anche le metriche restituite da `compute_metrics()`.
Come si può vedere, `predictions` è un array bi-dimensionale con dimensioni 408 x 2 (poiché 408 è il numero di elementi nel dataset). Questi sono i logit per ogni elemento del dataset passato a `predict()` (come già visto nel [capitolo precedente](/course/chapter2), tutti i modelli Transformer restituiscono logit). Per trasformarli in predizioni associabili alle etichette, bisogna prendere l'indice col valore massimo sul secondo asse:
```py
import numpy as np
preds = np.argmax(predictions.predictions, axis=-1)
```
Ora si possono paragonare i `preds` con le etichette. Per costruire la funzione `compute_metric()`, verranno utilizzate le metriche dalla libreria 🤗 Dataset. Si possono caricare le metriche associate con il dataset MRPC in maniera semplice, utilizzando la funzione `load_metric()`. L'oggetto restituito ha un metodo `compute()` (calcola) che possiamo usare per calcolare le metriche:
```py
from datasets import load_metric
metric = load_metric("glue", "mrpc")
metric.compute(predictions=preds, references=predictions.label_ids)
```
```python out
{'accuracy': 0.8578431372549019, 'f1': 0.8996539792387542}
```
L'esatto valore dei risultati potrebbe essere diverso nel vostro caso, a casa dell'inizializzazione casuale della testa del modello. In questo caso il nostro modello ha un'accuratezza del 85.78% sul set di validazione e un valore F1 di 89.97. Queste sono le due metriche utilizzate per valutare i risultati sul dataset MRPC per il benchmark GLUE. La tabella nell'[articolo su BERT](https://arxiv.org/pdf/1810.04805.pdf) riportava un F1 di 88.9 per il modello base. Quello era il modello `uncased` (senza distinzione fra minuscole e maiuscole) mentre noi stiamo usando quello `cased`, il che spiega il risultato migliore.
Mettendo tutto insieme si ottiene la funzione `compute_metrics()`:
```py
def compute_metrics(eval_preds):
metric = load_metric("glue", "mrpc")
logits, labels = eval_preds
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
```
Per vederla in azione e fare il report delle metriche alla fine di ogni epoca, ecco come si definisce un nuovo `Trainer` che includa questa funzione `compute_metrics()`:
```py
training_args = TrainingArguments("test-trainer", evaluation_strategy="epoch")
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
trainer = Trainer(
model,
training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)
```
Da notare che bisogna creare un nuovo oggetto `TrainingArguments` con il valore di `evaluation_strategy` pari a `"epoch"` e un nuovo modello — altrimenti si continuerebbe l'addestramento del modello già addestrato. Per lanciare una nuova esecuzione dell'addestramento si usa:
```
trainer.train()
```
Stavolta vi sarà il report della funzione obiettivo di validazione alla fine di ogni epoca, in aggiunta alla funzione obiettivo dell'addestramento. Di nuovo, i valori esatti di accuratezza/F1 ottenuti da voi potrebbero variare leggermente da quelli mostrati qui a causa dell'inizializzazione casuale della testa del modello, ma dovrebbero essere comparabili.
Il `Trainer` funzionerà direttamente su svariate GPU e TPU e ha molte opzioni, tra cui addestramento in precisione mista (utilizzare `fp16 = True` negli argomenti). I dettagli delle opzioni verranno esplorati nel Capitolo 10.
Qui si conclude l'introduzione all'affinamento usando l'API del `Trainer`. Esempi per i compiti più comuni in NLP verranno forniti nel Capitolo 7, ma per ora vediamo come ottenere la stessa cosa usando puramente Pytorch.
<Tip>
✏️ **Prova tu!** Affinare un modello sul dataset GLUE SST-2 utilizzando il processing dei dati già fatto nella sezione 2.
</Tip>
| course/chapters/it/chapter3/3.mdx/0 | {
"file_path": "course/chapters/it/chapter3/3.mdx",
"repo_id": "course",
"token_count": 3703
} | 132 |
# イントロダクション
<CourseFloatingBanner
chapter={1}
classNames="absolute z-10 right-0 top-0"
/>
## 🤗 コースへようこそ!
<Youtube id="00GKzGyWFEs" />
このコースでは、[Hugging Face](https://huggingface.co/)のエコシステムを形成するライブラリである[🤗 Transformers](https://github.com/huggingface/transformers)、[🤗 Datasets](https://github.com/huggingface/datasets)、[🤗 Tokenizers](https://github.com/huggingface/tokenizers)、[🤗 Accelerate](https://github.com/huggingface/accelerate)、そして[Hugging Face Hub](https://huggingface.co/models)を使って自然言語処理(NLP)について学習することができます。このコースは、完全に無料で取り組むことができ、広告もありません。
## 何を学ぶことができるのか?
こちらがこのコースの概要になります:
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Brief overview of the chapters of the course.">
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Brief overview of the chapters of the course.">
</div>
- 第1章から第4章では、🤗 Transformersライブラリのメインコンセプトを紹介します。このパートを終える頃には、Transformerモデルがどのように機能するかを理解し、[Hugging Face Hub](https://huggingface.co/models)にあるモデルを利用し、データセットでfine-tuningを行い、その成果をHub上で共有する方法を身につけることができるでしょう!
- 第5章から第8章では、代表的なNLPタスクに取り掛かる前に、🤗 Datasetsと🤗 Tokenizersの基礎を学びます。このパートを終える頃には、大半のNLPの課題に自分で取り組むことができるようになります。
- 第9章から第12章では、NLPの範囲にとどまらず、音声処理とコンピュータビジョンのタスクにTransformerモデルをどのように適用できるかを検討します。その過程で、モデルのデモを作成して共有することや本番環境用にモデルを最適化する方法を学ぶことができます。このパートを終える頃には、🤗 Transformersを(ほとんど)全ての機械学習の問題に適用する知識が身についていることでしょう!
このコースでは:
* Pythonの知識が必要です
* コースに取り組む前に、深層学習の入門コースである[fast.ai](https://www.fast.ai/)による [Practical Deep Learning for Coders](https://course.fast.ai/)や[DeepLearning.AI](https://www.deeplearning.ai/)が開発したプログラムなどを受講した方がよいでしょう
* [PyTorch](https://pytorch.org/)や[TensorFlow](https://www.tensorflow.org/)の事前知識は必須ではありませんが、どちらかに精通していると理解がより促進されるでしょう
このコースを修了した後は、DeepLearning.AIの[Natural Language Processing Specialization](https://www.coursera.org/specializations/natural-language-processing?utm_source=deeplearning-ai&utm_medium=institutions&utm_campaign=20211011-nlp-2-hugging_face-page-nlp-refresh)をご覧いただくことをお勧めします。ナイーブベイズやLSTMなどの従来のNLPモデルを幅広くカバーしており、これらも理解しておいて損はありませんよ!
## 私たちについて
筆者のプロフィール:
**Matthew Carrigan**はHugging Faceの機械学習エンジニアです。アイルランドのダブリンに住んでおり、以前はParse.lyで機械学習エンジニアとして、それ以前はトリニティ・カレッジ・ダブリンでポスドク研究員として働いていました。彼は、既存のアーキテクチャを拡張することでAGI(汎用人工知能)に到達できるとは思っていませんが、ロボットによる不死には大きな期待を寄せています。
**Lysandre Debut**はHugging Faceの機械学習エンジニアで、かなり初期の開発段階から🤗 Transformersライブラリに携わってきました。彼の目標は、非常にシンプルなAPIのツールを開発することによって、誰もがNLPにアクセスできるようにすることです。
**Sylvain Gugger**はHugging Faceのリサーチエンジニアで、🤗 Transformersライブラリのコアメンテナーの1人です。以前は、fast.aiのリサーチサイエンティストで、Jeremy Howard氏と[Deep Learning for Coders with fastai and PyTorch](https://learning.oreilly.com/library/view/deep-learning-for/9781492045519/)を共同執筆しています。限られたリソースでモデルを高速に学習させる技術を設計・改善することで、深層学習をより身近なものにすることに研究の焦点を置いています。
**Merve Noyan**はHugging Faceのデベロッパーアドボケイトであり、誰もが機械学習に取り組めるようなツールの開発とその周辺のコンテンツ作成に取り組んでいます。
**Lucile Saulnier**はHugging Faceの機械学習エンジニアで、オープンソースツールの開発および利用のサポートを行っています。また、共同でのモデルの学習やBigScienceなど、自然言語処理の分野で多くの研究プロジェクトに積極的に参加しています。
**Lewis Tunstall**はHugging Faceの機械学習エンジニアで、オープンソースツールの開発とより広いコミュニティで利用されるようにすることに注力しています。また、[オライリー出版のTransformersに関する本](https://www.oreilly.com/library/view/natural-language-processing/9781098136789/)の著者の1人です。
**Leandro von Werra**はHugging Faceのオープンソースチームの機械学習エンジニアであり、[オライリー出版のTransformersに関する本](https://www.oreilly.com/library/view/natural-language-processing/9781098136789/)の著者の1人です。機械学習全般に関わり、NLPプロジェクトを実運用に移行する経験をこの業界で数年積んでいます。
準備はできていますか?この章では、以下のことを学びます:
* `pipeline()`機能を使ったテキスト生成や分類などNLPタスクの取り組み方
* Transformerのアーキテクチャについて
* エンコーダ、デコーダ、エンコーダ・デコーダのアーキテクチャとユースケースの見分け方 | course/chapters/ja/chapter1/1.mdx/0 | {
"file_path": "course/chapters/ja/chapter1/1.mdx",
"repo_id": "course",
"token_count": 2873
} | 133 |
<FrameworkSwitchCourse {fw} />
# 学習済みモデルを使う
{#if fw === 'pt'}
<CourseFloatingBanner chapter={4}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/ja/chapter4/section2_pt.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/ja/chapter4/section2_pt.ipynb"},
]} />
{:else}
<CourseFloatingBanner chapter={4}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/ja/chapter4/section2_tf.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/ja/chapter4/section2_tf.ipynb"},
]} />
{/if}
モデルハブは適切なモデルを簡単に選択できるようにし、どのライブラリからでも数行のコードで使用できるようにします。では、実際にこれらのモデルをどのように使用し、どのようにコミュニティに貢献するかを見ていきましょう。
例えば、マスクフィルを行えるフランス語のモデルを探しているとします。
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/camembert.gif" alt="Selecting the Camembert model." width="80%"/>
</div>
試しに`camembert-base`チェックポイントを選択してみましょう。camembert-base`という識別子があれば、すぐに使い始めることができます。これまでの章で見てきたように、 `pipeline()` 関数を使用してインスタンスを作成することができます:
```py
from transformers import pipeline
camembert_fill_mask = pipeline("fill-mask", model="camembert-base")
results = camembert_fill_mask("Le camembert est <mask> :)")
```
```python out
[
{'sequence': 'Le camembert est délicieux :)', 'score': 0.49091005325317383, 'token': 7200, 'token_str': 'délicieux'},
{'sequence': 'Le camembert est excellent :)', 'score': 0.1055697426199913, 'token': 2183, 'token_str': 'excellent'},
{'sequence': 'Le camembert est succulent :)', 'score': 0.03453313186764717, 'token': 26202, 'token_str': 'succulent'},
{'sequence': 'Le camembert est meilleur :)', 'score': 0.0330314114689827, 'token': 528, 'token_str': 'meilleur'},
{'sequence': 'Le camembert est parfait :)', 'score': 0.03007650189101696, 'token': 1654, 'token_str': 'parfait'}
]
```
ご覧の通り、パイプライン内でのモデルのロードは非常に簡単です。唯一気をつけなければならないのは、選択したチェックポイントが使用するタスクに適しているかということです。例えば、ここでは`camembert-base`というチェックポイントを`fill-mask`というパイプラインでロードしていますが、これは全く問題ありません。しかし、このチェックポイントを`text-classification`パイプラインでロードしたとすると、`camembert-base`の「ヘッド」がこのタスクに適していないため、結果が意味をなさないことになります!適切なチェックポイントを選択するために、ハギングフェイスハブインタフェースにあるタスクセレクタを使用することをお勧めします:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/tasks.png" alt="The task selector on the web interface." width="80%"/>
</div>
また、モデル・アーキテクチャを直接使用して、チェックポイントをインスタンス化することもできます:
{#if fw === 'pt'}
```py
from transformers import CamembertTokenizer, CamembertForMaskedLM
tokenizer = CamembertTokenizer.from_pretrained("camembert-base")
model = CamembertForMaskedLM.from_pretrained("camembert-base")
```
しかし、代わりに[`Auto*` classes](https://huggingface.co/transformers/model_doc/auto.html?highlight=auto#auto-classes)を使用することをお勧めします。これらは設計上、(モデル)アーキテクチャに依存しないためです。先ほどのコードサンプルでは、CamemBERT アーキテクチャでロード可能なチェックポイントに限定していましたが、 `Auto*`クラスを使用すると、チェックポイントを簡単に切り替えることができます:
```py
from transformers import AutoTokenizer, AutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained("camembert-base")
model = AutoModelForMaskedLM.from_pretrained("camembert-base")
```
{:else}
```py
from transformers import CamembertTokenizer, TFCamembertForMaskedLM
tokenizer = CamembertTokenizer.from_pretrained("camembert-base")
model = TFCamembertForMaskedLM.from_pretrained("camembert-base")
```
しかし、代わりに[`TFAuto*` classes](https://huggingface.co/transformers/model_doc/auto.html?highlight=auto#auto-classes)を使用することをお勧めします。これらは設計上、アーキテクチャに依存しないためです。先ほどのコードサンプルでは、CamemBERT アーキテクチャでロード可能なチェックポイントに限定していましたが、 `TFAuto*`クラスを使用すると、チェックポイントを簡単に切り替えることができます:
```py
from transformers import AutoTokenizer, TFAutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained("camembert-base")
model = TFAutoModelForMaskedLM.from_pretrained("camembert-base")
```
{/if}
<Tip>
学習済みのモデルを使う場合は、どのように学習したのか、どのデータセットで学習したのか、その限界と偏りを必ず確認すること。これらの情報はすべて、モデルカードに記載されています。
</Tip>
| course/chapters/ja/chapter4/2.mdx/0 | {
"file_path": "course/chapters/ja/chapter4/2.mdx",
"repo_id": "course",
"token_count": 2401
} | 134 |
# パート2終了!
<CourseFloatingBanner
chapter={8}
classNames="absolute z-10 right-0 top-0"
/>
お疲れ様でした。第2部を無事に完了しました!今は第3部について働いてるので情報を見逃せないように我々の[ニュースレター](https://huggingface.curated.co/)に応募して下さい!
今は、様々なNLPタスクに取り組み、その上でモデルを微調整またはプリトレーニングできるようになったはずです!その結果を [モデルハブ] (https://huggingface.co/models) でコミュニティと共有することを忘れないでください。
皆さんがコースのお陰に得た知識で何を作るのか、楽しみです! | course/chapters/ja/chapter8/6.mdx/0 | {
"file_path": "course/chapters/ja/chapter8/6.mdx",
"repo_id": "course",
"token_count": 345
} | 135 |
<FrameworkSwitchCourse {fw} />
# Models
{#if fw === 'pt'}
<DocNotebookDropdown
classNames="absolute z-10 right-0 top-0"
options={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/chapter2/section3_pt.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/chapter2/section3_pt.ipynb"},
]} />
{:else}
<DocNotebookDropdown
classNames="absolute z-10 right-0 top-0"
options={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/chapter2/section3_tf.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/chapter2/section3_tf.ipynb"},
]} />
{/if}
{#if fw === 'pt'}
<Youtube id="AhChOFRegn4"/>
{:else}
<Youtube id="d3JVgghSOew"/>
{/if}
{#if fw === 'pt'}
이번 섹션에서는 모델을 생성하고 사용하는 방법에 대해 자세히 알아보겠습니다. 체크포인트에서 모델을 인스턴스화하는 데 유용한 `AutoModel` 클래스를 사용할 것입니다.
이 `AutoModel` 클래스와 관련 클래스들은 실제로 라이브러리에 있는 다양한 모델들을 감싸고 있는 간단한 래퍼입니다. 이 래퍼는 체크포인트에 적합한 모델 아키텍처를 자동으로 추측하고, 이 아키텍처를 가진 모델을 인스턴스화하는 것도 똑똑하게 처리합니다.
{:else}
이번 섹션에서는 모델을 생성하고 사용하는 방법에 대해 자세히 알아보겠습니다. 체크포인트에서 모델을 인스턴스화하는 데 유용한 `TFAutoModel` 클래스를 사용할 것입니다.
이 `TFAutoModel` 클래스와 관련 클래스들은 실제로 라이브러리에 있는 다양한 모델들을 감싸고 있는 간단한 래퍼입니다. 이 래퍼는 체크포인트에 적합한 모델 아키텍처를 자동으로 추측하고, 이 아키텍처를 가진 모델을 인스턴스화하는 것도 똑똑하게 처리합니다.
{/if}
하지만, 만약 모델의 아키텍처를 직접 정의하고 싶다면, 해당 모델의 클래스를 사용할 수 있습니다. BERT 모델을 예로 들어보겠습니다.
## Creating a Transformer (Transformer 생성하기)
BERT 모델을 초기화 하기 위해서는 먼저 모델의 환경설정을 로드해야 합나다.
{#if fw === 'pt'}
```py
from transformers import BertConfig, BertModel
# Building the config
config = BertConfig()
# Building the model from the config
model = BertModel(config)
```
{:else}
```py
from transformers import BertConfig, TFBertModel
# Building the config
config = BertConfig()
# Building the model from the config
model = TFBertModel(config)
```
{/if}
이 환경설정은 모델을 구축하는데 사용되는 많은 속성들을 포함하고 있습니다:
```py
print(config)
```
```python out
BertConfig {
[...]
"hidden_size": 768,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
[...]
}
```
아직 이 속성들이 무엇을 의미하는지는 모르겠지만, 몇몇은 익숙할 것입니다: `hidden_size` 속성은 `hidden_states` 벡터의 크기를 정의하고, `num_hidden_layers`는 Transformer 모델이 가지고 있는 레이어의 수를 정의합니다.
### Different loading methods (다른 로딩 방법)
무작위 값을 통한 기본 환경설정으로 모델 생성
{#if fw === 'pt'}
```py
from transformers import BertConfig, BertModel
config = BertConfig()
model = BertModel(config)
# Model is randomly initialized!
```
{:else}
```py
from transformers import BertConfig, TFBertModel
config = BertConfig()
model = TFBertModel(config)
# Model is randomly initialized!
```
{/if}
이 모델은 무작위로 초기화되어 있기 때문에, 아직은 아무런 유용한 정보를 포함하고 있지 않습니다. 이 모델을 훈련시키기 위해서는, 먼저 훈련 데이터를 준비해야 합니다. 우리가 바닥부터 학습을 할 수 있지만, 이 과정은 [Chapter 1](/course/chapter1)에서 확인 했듯이, 많은 시간과 많은 데이터가 필요하며, 학습 환경에도 큰 영향을 미칩니다.
이러한 불필요한 중복된 노력을 피하기 위해서, 이미 훈련된 모델을 공유하고 재사용할 수 있어야 합니다.
훈련된 Transformer 모델을 불러오는 것은 매우 간단합니다 - `from_pretrained` 메소드를 사용하면 됩니다.
{#if fw === 'pt'}
```py
from transformers import BertModel
model = BertModel.from_pretrained("bert-base-cased")
```
이전에 봤듯이, `BertModel` 대신 `AutoModel` 클래스를 사용할 수도 있습니다. 이제부터는 체크포인트에 독립적인 코드를 생성하기 위해 `AutoModel`를 사용하겠습니다. 만약 코드가 한 체크포인트에서 잘 동작한다면, 다른 체크포인트에서도 잘 동작해야 합니다. 이는 체크포인트의 아키텍처가 다르더라도, 비슷한 작업(예를 들어, 감성 분석 작업)을 위해 훈련된 경우에도 적용됩니다.
{:else}
```py
from transformers import TFBertModel
model = TFBertModel.from_pretrained("bert-base-cased")
```
이전에 봤듯이, `TFBertModel` 대신 `TFAutoModel` 클래스를 사용할 수도 있습니다. 이제부터는 체크포인트에 독립적인 코드를 생성하기 위해 `TFAutoModel`를 사용하겠습니다. 만약 코드가 한 체크포인트에서 잘 동작한다면, 다른 체크포인트에서도 잘 동작해야 합니다. 이는 체크포인트의 아키텍처가 다르더라도, 비슷한 작업(예를 들어, 감성 분석 작업)을 위해 훈련된 경우에도 적용됩니다.
{/if}
이 코드 샘플에서는 `BertConfig`를 사용하지 않았고, 대신 `bert-base-cased` 식별자를 통해 사전 훈련된 모델을 불러왔습니다. 이는 BERT의 저자들이 직접 훈련시킨 체크포인트입니다. 자세한 내용은 [모델 카드](https://huggingface.co/bert-base-cased)에서 확인할 수 있습니다.
이 모델은 체크포인트의 모든 가중치로 초기화되었습니다. 이 모델은 체크포인트에서 훈련된 작업에 대해 직접 추론에 사용할 수 있으며, 새로운 작업에 대해 미세 조정할 수도 있습니다. 사전 훈련된 가중치로부터 학습을 진행하면, 빈 상태에서 훈련을 시작하는 것보다 빠르게 좋은 결과를 얻을 수 있습니다.
모델을 불러오는 또 다른 방법은 `from_pretrained()` 메서드를 사용하는 것입니다. 이 메서드는 체크포인트를 다운로드하고, 캐시에 저장합니다(이후 `from_pretrained()` 메서드를 호출할 때 다시 다운로드하지 않습니다). 캐시 폴더는 기본적으로 *~/.cache/huggingface/transformers*에 저장됩니다. 캐시 폴더를 사용자 정의하려면 `HF_HOME` 환경 변수를 설정하면 됩니다.
모델을 불러오는 식별자는 BERT 아키텍처와 호환되는 경우 모델 허브의 모든 모델의 식별자가 될 수 있습니다. BERT 체크포인트의 전체 목록은 [여기](https://huggingface.co/models?filter=bert)에서 확인할 수 있습니다.
### Saving methods (저장 방법)
모델을 저장하는 방법은 불러오는 방법처럼 쉽습니다. `save_pretrained()` 메서드를 사용하면 됩니다. 이 메서드는 `from_pretrained()` 메서드와 유사합니다.
```py
model.save_pretrained("directory_on_my_computer")
```
이는 2가지 파일을 저장하게 됩니다:
{#if fw === 'pt'}
```
ls directory_on_my_computer
config.json pytorch_model.bin
```
{:else}
```
ls directory_on_my_computer
config.json tf_model.h5
```
{/if}
*config.json* 파일은 모델 아키텍처를 구축하는 데 필요한 속성을 알려줍니다. 이 파일에는 체크포인트가 어디에서 생성되었는지, 마지막으로 체크포인트를 저장할 때 사용한 🤗 Transformers 버전 등의 메타데이터도 포함되어 있습니다.
{#if fw === 'pt'}
The *pytorch_model.bin* file is known as the *state dictionary*; it contains all your model's weights. The two files go hand in hand; the configuration is necessary to know your model's architecture, while the model weights are your model's parameters.
{:else}
The *tf_model.h5* file is known as the *state dictionary*; it contains all your model's weights. The two files go hand in hand; the configuration is necessary to know your model's architecture, while the model weights are your model's parameters.
{/if}
## Using a Transformer model for inference (Transformer 모델을 추론에 사용하기)
Now that you know how to load and save a model, let's try using it to make some predictions. Transformer models can only process numbers — numbers that the tokenizer generates. But before we discuss tokenizers, let's explore what inputs the model accepts.
이제 모델을 불러오고 저장하는 방법을 알았으니, 모델을 사용하여 예측을 만들어 보겠습니다. Transformer 모델은 토크나이저가 생성하는 숫자만 처리할 수 있습니다. 그러나 토크나이저에 대해 논의하기 전에 모델이 받는 입력에 대해 알아보겠습니다.
토크나이저는 입력을 적절한 프레임워크의 텐서로 변환할 수 있지만, 이해도를 높이기 위해 모델에 입력을 보내기 전 무엇을 반드시 해야 하는지 간단히 살펴보겠습니다.
우리가 여러 시퀀스들이 있다고 가정해 봅시다:
```py
sequences = ["Hello!", "Cool.", "Nice!"]
```
토크나이저는 이를 단어 인덱스로 변환합니다. 이를 *input IDs*라고 합니다. 각 시퀀스는 이제 숫자 목록입니다! 결과는 다음과 같습니다:
```py no-format
encoded_sequences = [
[101, 7592, 999, 102],
[101, 4658, 1012, 102],
[101, 3835, 999, 102],
]
```
이는 인코딩된 시퀀스의 목록입니다. 텐서는 정사각형 모양만 받을 수 있습니다 (행렬을 생각해 보세요). 이 "배열"은 이미 정사각형 모양이므로 텐서로 변환하는 것은 쉽습니다:
This is a list of encoded sequences: a list of lists. Tensors only accept rectangular shapes (think matrices). This "array" is already of rectangular shape, so converting it to a tensor is easy:
{#if fw === 'pt'}
```py
import torch
model_inputs = torch.tensor(encoded_sequences)
```
{:else}
```py
import tensorflow as tf
model_inputs = tf.constant(encoded_sequences)
```
{/if}
### Using the tensors as inputs to the model (텐서를 모델의 입력으로 사용하기)
모델의 텐서를 사용하는 것은 매우 간단합니다. 모델에 입력을 넣기만 하면 됩니다:
```py
output = model(model_inputs)
```
모델이 다양한 어규먼트를 받는 중에, 입력은 input IDs 만 필요합니다. 나머지 어규먼트들은 언제 필요한지, 어떤 역할을 하는지는 나중에 설명하겠습니다. 먼저 토크나이저에 대해 좀 더 자세히 알아보겠습니다. | course/chapters/ko/chapter2/3.mdx/0 | {
"file_path": "course/chapters/ko/chapter2/3.mdx",
"repo_id": "course",
"token_count": 7315
} | 136 |
- title: 0. Configuração
sections:
- local: chapter0/1
title: Introdução
- title: 1. Modelos de Transformers
sections:
- local: chapter1/1
title: Introdução
- local: chapter1/2
title: Processamento de Linguagem Natural
- local: chapter1/3
title: Transformers, o que eles podem fazer?
- local: chapter1/4
title: Como os Transformers trabalham?
- local: chapter1/5
title: Modelos decodificadores
- local: chapter1/6
title: Modelos codificadores
- local: chapter1/7
title: Modelos sequência a sequência
- local: chapter1/8
title: Vieses e limitações
- local: chapter1/9
title: Resumo
- local: chapter1/10
title: Questionário de fim de capítulo
quiz: 1
- title: 2. Usando 🤗 Transformers
sections:
- local: chapter2/1
title: Introdução
- local: chapter2/2
title: Por dentro da função pipeline
- local: chapter2/3
title: Modelos
- local: chapter2/4
title: Tokenizers
- local: chapter2/5
title: Tratando sequências múltiplas
- local: chapter2/6
title: Colocando tudo junto
- local: chapter2/7
title: Uso básico concluído!
- local: chapter2/8
title: Questionário de fim de capítulo
quiz: 2
- title: 3. Ajustando um modelo pré treinado
sections:
- local: chapter3/1
title: Introdução
- title: 4. Compartilhamento de modelos e tokenizer
sections:
- local: chapter4/1
title: O Hugging Face Hub
- local: chapter4/2
title: Usando modelos pré-treinados
- local: chapter4/3
title: Compartilhando modelos pré-treinados
- local: chapter4/4
title: Construindo um cartão para o modelo
- local: chapter4/5
title: Parte 1 completa!
- local: chapter4/6
title: Questionário de fim de capítulo
quiz: 4
- title: 5. A biblioteca Datasets 🤗
sections:
- local: chapter5/1
title: Introdução
- local: chapter5/2
title: E se o meu dataset não estiver no Hub?
- local: chapter5/3
title: Hora de fatiar e dividir os dados
- local: chapter5/4
title: Big data? 🤗 Datasets ao resgate
- local: chapter5/5
title: Criando seu próprio dataset
- local: chapter5/6
title: Busca semântica com o FAISS
- local: chapter5/7
title: Confira o 🤗 Datasets!
- local: chapter5/8
title: Questionário de fim de capítulo
quiz: 5
- title: 6. A biblioteca Tokenizers 🤗
sections:
- local: chapter6/1
title: Introdução
- local: chapter6/2
title: Treinando um novo tokenizador
- local: chapter6/3
title: Os poderes especiais dos tokenizadores rápidos
- title: 7. Principais tarefas NLP
sections:
- local: chapter7/1
title: Introdução
- title: 8. Como pedir ajuda 🤗
sections:
- local: chapter8/1
title: Introdução
- local: chapter8/2
title: O que fazer quando ocorrer um erro
- local: chapter8/3
title: Pedindo ajuda nos fóruns
- title: Evento do curso
sections:
- local: events/2
title: Evento de lançamento da Parte 2
| course/chapters/pt/_toctree.yml/0 | {
"file_path": "course/chapters/pt/_toctree.yml",
"repo_id": "course",
"token_count": 1197
} | 137 |
<FrameworkSwitchCourse {fw} />
# Tratando sequências múltiplas
{#if fw === 'pt'}
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/pt/chapter2/section5_pt.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/pt/chapter2/section5_pt.ipynb"},
]} />
{:else}
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/pt/chapter2/section5_tf.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/pt/chapter2/section5_tf.ipynb"},
]} />
{/if}
{#if fw === 'pt'}
<Youtube id="M6adb1j2jPI"/>
{:else}
<Youtube id="ROxrFOEbsQE"/>
{/if}
Na seção anterior, exploramos os casos mais simples de uso: fazer inferência sobre uma única sequência de pequeno comprimento. No entanto, surgem algumas questões:
- Como nós tratamos diversas sequências?
- Como nós tratamos diversas sequências *de diferentes tamanhos*?
- Os índices de vocabulário são as únicas entradas que permitem que um modelo funcione bem?
- Existe uma sequência muito longa?
Vamos ver que tipos de problemas estas questões colocam, e como podemos resolvê-los usando a API do 🤗 Transformers.
## Modelos esperam um batch de entradas
No exercício anterior, você viu como as sequências são traduzidas em listas de números. Vamos converter esta lista de números em um tensor e enviá-la para o modelo:
{#if fw === 'pt'}
```py
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence = "I've been waiting for a HuggingFace course my whole life."
tokens = tokenizer.tokenize(sequence)
ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = torch.tensor(ids)
# This line will fail.
model(input_ids)
```
```python out
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
```
{:else}
```py
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence = "I've been waiting for a HuggingFace course my whole life."
tokens = tokenizer.tokenize(sequence)
ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = tf.constant(ids)
# This line will fail.
model(input_ids)
```
```py out
InvalidArgumentError: Input to reshape is a tensor with 14 values, but the requested shape has 196 [Op:Reshape]
```
{/if}
Oh não! Por que isso falhou? "Seguimos os passos do pipeline na seção 2.
O problema é que enviamos uma única sequência para o modelo, enquanto que os 🤗 transformers esperam várias sentenças por padrão. Aqui tentamos fazer tudo o que o tokenizer fez nos bastidores quando o aplicamos a uma `sequência`, mas se você olhar com atenção, verá que ele não apenas converteu a lista de IDs de entrada em um tensor, mas acrescentou uma dimensão em cima dele:
{#if fw === 'pt'}
```py
tokenized_inputs = tokenizer(sequence, return_tensors="pt")
print(tokenized_inputs["input_ids"])
```
```python out
tensor([[ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172,
2607, 2026, 2878, 2166, 1012, 102]])
```
{:else}
```py
tokenized_inputs = tokenizer(sequence, return_tensors="tf")
print(tokenized_inputs["input_ids"])
```
```py out
<tf.Tensor: shape=(1, 16), dtype=int32, numpy=
array([[ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662,
12172, 2607, 2026, 2878, 2166, 1012, 102]], dtype=int32)>
```
{/if}
Vamos tentar novamente e acrescentar uma nova dimensão:
{#if fw === 'pt'}
```py
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence = "I've been waiting for a HuggingFace course my whole life."
tokens = tokenizer.tokenize(sequence)
ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = torch.tensor([ids])
print("Input IDs:", input_ids)
output = model(input_ids)
print("Logits:", output.logits)
```
{:else}
```py
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence = "I've been waiting for a HuggingFace course my whole life."
tokens = tokenizer.tokenize(sequence)
ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = tf.constant([ids])
print("Input IDs:", input_ids)
output = model(input_ids)
print("Logits:", output.logits)
```
{/if}
Printamos os IDs de entrada assim como os logits resultantes - aqui está a saída:
{#if fw === 'pt'}
```python out
Input IDs: [[ 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012]]
Logits: [[-2.7276, 2.8789]]
```
{:else}
```py out
Input IDs: tf.Tensor(
[[ 1045 1005 2310 2042 3403 2005 1037 17662 12172 2607 2026 2878
2166 1012]], shape=(1, 14), dtype=int32)
Logits: tf.Tensor([[-2.7276208 2.8789377]], shape=(1, 2), dtype=float32)
```
{/if}
*Batching* é o ato de enviar múltiplas sentenças através do modelo, todas de uma só vez. Se você tiver apenas uma frase, você pode apenas construir um lote com uma única sequência:
```
batched_ids = [ids, ids]
```
Este é um lote de duas sequências idênticas!
<Tip>
✏️ **Experimente!** Converta esta lista de `batched_ids` em um tensor e passe-a através de seu modelo. Verifique se você obtém os mesmos logits que antes (mas duas vezes)!
</Tip>
O Batching permite que o modelo funcione quando você o alimenta com várias frases. Usar várias sequências é tão simples quanto construir um lote com uma única sequência. Há uma segunda questão, no entanto. Quando você está tentando agrupar duas (ou mais) sentenças, elas podem ser de comprimentos diferentes. Se você já trabalhou com tensores antes, você sabe que eles precisam ser de forma retangular, então você não será capaz de converter a lista de IDs de entrada em um tensor diretamente. Para contornar este problema, normalmente realizamos uma *padronização* (padding) nas entradas.
## Realizando padding nas entradas
A seguinte lista de listas não pode ser convertida em um tensor:
```py no-format
batched_ids = [
[200, 200, 200],
[200, 200]
]
```
Para contornar isso, usaremos *padding* para fazer com que nossos tensores tenham uma forma retangular. O padding garante que todas as nossas frases tenham o mesmo comprimento, acrescentando uma palavra especial chamada *padding token* às frases com menos valores. Por exemplo, se você tiver 10 frases com 10 palavras e 1 frase com 20 palavras, o padding garantirá que todas as frases tenham 20 palavras. Em nosso exemplo, o tensor resultante se parece com isto:
```py no-format
padding_id = 100
batched_ids = [
[200, 200, 200],
[200, 200, padding_id],
]
```
O padding do ID token pode ser encontrada em `tokenizer.pad_token_id`. Vamos utilizá-lo e enviar nossas duas frases através do modelo individualmente e agrupadas em batches:
{#if fw === 'pt'}
```py no-format
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence1_ids = [[200, 200, 200]]
sequence2_ids = [[200, 200]]
batched_ids = [
[200, 200, 200],
[200, 200, tokenizer.pad_token_id],
]
print(model(torch.tensor(sequence1_ids)).logits)
print(model(torch.tensor(sequence2_ids)).logits)
print(model(torch.tensor(batched_ids)).logits)
```
```python out
tensor([[ 1.5694, -1.3895]], grad_fn=<AddmmBackward>)
tensor([[ 0.5803, -0.4125]], grad_fn=<AddmmBackward>)
tensor([[ 1.5694, -1.3895],
[ 1.3373, -1.2163]], grad_fn=<AddmmBackward>)
```
{:else}
```py no-format
model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence1_ids = [[200, 200, 200]]
sequence2_ids = [[200, 200]]
batched_ids = [
[200, 200, 200],
[200, 200, tokenizer.pad_token_id],
]
print(model(tf.constant(sequence1_ids)).logits)
print(model(tf.constant(sequence2_ids)).logits)
print(model(tf.constant(batched_ids)).logits)
```
```py out
tf.Tensor([[ 1.5693678 -1.3894581]], shape=(1, 2), dtype=float32)
tf.Tensor([[ 0.5803005 -0.41252428]], shape=(1, 2), dtype=float32)
tf.Tensor(
[[ 1.5693681 -1.3894582]
[ 1.3373486 -1.2163193]], shape=(2, 2), dtype=float32)
```
{/if}
Há algo errado com os logits em nossas predições em batches: a segunda fileira deveria ser a mesma que os logits para a segunda frase, mas temos valores completamente diferentes!
Isto porque a característica chave dos Transformer são as camadas de atenção que *contextualizam* cada token. Estes levarão em conta os tokens de padding, uma vez que atendem a todos os tokens de uma sequência. Para obter o mesmo resultado ao passar frases individuais de diferentes comprimentos pelo modelo ou ao passar um batch com as mesmas frases e os paddings aplicados, precisamos dizer a essas camadas de atenção para ignorar os tokens de padding. Isto é feito com o uso de uma máscara de atenção (*attention mask*).
## Attention masks
*Attention masks* são tensores com a mesma forma exata do tensor de IDs de entrada, preenchidos com 0s e 1s: 1s indicam que os tokens correspondentes devem ser atendidas, e 0s indicam que os tokens correspondentes não devem ser atendidas (ou seja, devem ser ignoradas pelas camadas de atenção do modelo).
Vamos completar o exemplo anterior com uma máscara de atenção:
{#if fw === 'pt'}
```py no-format
batched_ids = [
[200, 200, 200],
[200, 200, tokenizer.pad_token_id],
]
attention_mask = [
[1, 1, 1],
[1, 1, 0],
]
outputs = model(torch.tensor(batched_ids), attention_mask=torch.tensor(attention_mask))
print(outputs.logits)
```
```python out
tensor([[ 1.5694, -1.3895],
[ 0.5803, -0.4125]], grad_fn=<AddmmBackward>)
```
{:else}
```py no-format
batched_ids = [
[200, 200, 200],
[200, 200, tokenizer.pad_token_id],
]
attention_mask = [
[1, 1, 1],
[1, 1, 0],
]
outputs = model(tf.constant(batched_ids), attention_mask=tf.constant(attention_mask))
print(outputs.logits)
```
```py out
tf.Tensor(
[[ 1.5693681 -1.3894582 ]
[ 0.5803021 -0.41252586]], shape=(2, 2), dtype=float32)
```
{/if}
Agora obtemos os mesmos logits para a segunda frase do batch.
Observe como o último valor da segunda sequência é um ID de padding, que é um valor 0 na máscara de atenção.
<Tip>
✏️ **Experimente!** Aplique a tokenização manualmente nas duas frases usadas na seção 2 ("I've been waiting for a HuggingFace course my whole life." e "I hate this so much!"). Passe-as através do modelo e verifique se você obtém os mesmos logits que na seção 2. Agora, agrupe-os usando o token de padding e depois crie a máscara de atenção adequada. Verifique que você obtenha os mesmos resultados ao passar pelo modelo!
</Tip>
## Sequências mais longas
Com os Transformer, há um limite para os comprimentos das sequências, podemos passar os modelos. A maioria dos modelos manipula sequências de até 512 ou 1024 tokens, e se chocará quando solicitados a processar sequências mais longas. Há duas soluções para este problema:
- Use um modelo com suporte a um comprimento mais longo de sequência.
- Trunque suas sequências.
Os modelos têm diferentes comprimentos de sequência suportados, e alguns são especializados no tratamento de sequências muito longas. O [Longformer](https://huggingface.co/transformers/model_doc/longformer.html) é um exemplo, e outro exemplo é o [LED](https://huggingface.co/transformers/model_doc/led.html). Se você estiver trabalhando em uma tarefa que requer sequências muito longas, recomendamos que você dê uma olhada nesses modelos.
Caso contrário, recomendamos que você trunque suas sequências, especificando o parâmetro `max_sequence_length`:
```py
sequence = sequence[:max_sequence_length]
```
| course/chapters/pt/chapter2/5.mdx/0 | {
"file_path": "course/chapters/pt/chapter2/5.mdx",
"repo_id": "course",
"token_count": 4911
} | 138 |
<FrameworkSwitchCourse {fw} />
# Busca semântica com o FAISS
{#if fw === 'pt'}
<CourseFloatingBanner chapter={5}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/pt/chapter5/section6_pt.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/pt/chapter5/section6_pt.ipynb"},
]} />
{:else}
<CourseFloatingBanner chapter={5}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/pt/chapter5/section6_tf.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/pt/chapter5/section6_tf.ipynb"},
]} />
{/if}
Na [seção 5](/course/chapter5/5), criamos um conjunto de dados de issues e comentários do GitHub do repositório 🤗 Datasets. Nesta seção, usaremos essas informações para construir um mecanismo de pesquisa que pode nos ajudar a encontrar respostas para nossas perguntas mais urgentes sobre a biblioteca!
<Youtube id="OATCgQtNX2o"/>
## Usando embeddings para pesquisa semântica
Como vimos no [Capítulo 1](/course/chapter1), os modelos de linguagem baseados em Transformer representam cada token em um intervalo de texto como um _vetor de incorporação_. Acontece que é possível "agrupar" as incorporações individuais para criar uma representação vetorial para frases inteiras, parágrafos ou (em alguns casos) documentos. Essas incorporações podem ser usadas para encontrar documentos semelhantes no corpus calculando a similaridade do produto escalar (ou alguma outra métrica de similaridade) entre cada incorporação e retornando os documentos com maior sobreposição.
Nesta seção, usaremos embeddings para desenvolver um mecanismo de pesquisa semântica. Esses mecanismos de pesquisa oferecem várias vantagens sobre as abordagens convencionais que se baseiam na correspondência de palavras-chave em uma consulta com os documentos.
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter5/semantic-search.svg" alt="Semantic search."/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter5/semantic-search-dark.svg" alt="Semantic search."/>
</div>
## Carregando e preparando o conjunto de dados
A primeira coisa que precisamos fazer é baixar nosso conjunto de dados de issues do GitHub, então vamos usar a biblioteca 🤗 Hub para resolver a URL onde nosso arquivo está armazenado no Hugging Face Hub:
```py
from huggingface_hub import hf_hub_url
data_files = hf_hub_url(
repo_id="lewtun/github-issues",
filename="datasets-issues-with-comments.jsonl",
repo_type="dataset",
)
```
Com a URL armazenada em `data_files`, podemos carregar o conjunto de dados remoto usando o método apresentado na [seção 2](/course/chapter5/2):
```py
from datasets import load_dataset
issues_dataset = load_dataset("json", data_files=data_files, split="train")
issues_dataset
```
```python out
Dataset({
features: ['url', 'repository_url', 'labels_url', 'comments_url', 'events_url', 'html_url', 'id', 'node_id', 'number', 'title', 'user', 'labels', 'state', 'locked', 'assignee', 'assignees', 'milestone', 'comments', 'created_at', 'updated_at', 'closed_at', 'author_association', 'active_lock_reason', 'pull_request', 'body', 'performed_via_github_app', 'is_pull_request'],
num_rows: 2855
})
```
Aqui nós especificamos a divisão padrão `train` em `load_dataset()`, então ele retorna um `Dataset` em vez de um `DatasetDict`. A primeira ordem de negócios é filtrar os pull request, pois elas tendem a ser raramente usadas para responder a consultas de usuários e introduzirão ruído em nosso mecanismo de pesquisa. Como já deve ser familiar, podemos usar a função `Dataset.filter()` para excluir essas linhas em nosso conjunto de dados. Enquanto estamos nisso, também vamos filtrar as linhas sem comentários, pois elas não fornecem respostas às consultas dos usuários:
```py
issues_dataset = issues_dataset.filter(
lambda x: (x["is_pull_request"] == False and len(x["comments"]) > 0)
)
issues_dataset
```
```python out
Dataset({
features: ['url', 'repository_url', 'labels_url', 'comments_url', 'events_url', 'html_url', 'id', 'node_id', 'number', 'title', 'user', 'labels', 'state', 'locked', 'assignee', 'assignees', 'milestone', 'comments', 'created_at', 'updated_at', 'closed_at', 'author_association', 'active_lock_reason', 'pull_request', 'body', 'performed_via_github_app', 'is_pull_request'],
num_rows: 771
})
```
Podemos ver que há muitas colunas em nosso conjunto de dados, a maioria das quais não precisamos para construir nosso mecanismo de pesquisa. De uma perspectiva de pesquisa, as colunas mais informativas são `title`, `body` e `comments`, enquanto `html_url` nos fornece um link de volta para a issue de origem. Vamos usar a função `Dataset.remove_columns()` para descartar o resto:
```py
columns = issues_dataset.column_names
columns_to_keep = ["title", "body", "html_url", "comments"]
columns_to_remove = set(columns_to_keep).symmetric_difference(columns)
issues_dataset = issues_dataset.remove_columns(columns_to_remove)
issues_dataset
```
```python out
Dataset({
features: ['html_url', 'title', 'comments', 'body'],
num_rows: 771
})
```
Para criar nossos embeddings, aumentaremos cada comentário com o título e o corpo da issue, pois esses campos geralmente incluem informações contextuais úteis. Como nossa coluna `comments` é atualmente uma lista de comentários para cada issue, precisamos "explodir" a coluna para que cada linha consista em uma tupla `(html_url, title, body, comment)`. No Pandas podemos fazer isso com a função [`DataFrame.explode()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.explode.html), que cria uma nova linha para cada elemento em uma coluna semelhante a uma lista, enquanto replica todos os outros valores de coluna. Para ver isso em ação, vamos primeiro mudar para o formato `DataFrame` do Pandas:
```py
issues_dataset.set_format("pandas")
df = issues_dataset[:]
```
Se inspecionarmos a primeira linha neste `DataFrame`, podemos ver que há quatro comentários associados a esta issue:
```py
df["comments"][0].tolist()
```
```python out
['the bug code locate in :\r\n if data_args.task_name is not None:\r\n # Downloading and loading a dataset from the hub.\r\n datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir)',
'Hi @jinec,\r\n\r\nFrom time to time we get this kind of `ConnectionError` coming from the github.com website: https://raw.githubusercontent.com\r\n\r\nNormally, it should work if you wait a little and then retry.\r\n\r\nCould you please confirm if the problem persists?',
'cannot connect,even by Web browser,please check that there is some problems。',
'I can access https://raw.githubusercontent.com/huggingface/datasets/1.7.0/datasets/glue/glue.py without problem...']
```
Quando explodimos `df`, esperamos obter uma linha para cada um desses comentários. Vamos verificar se é o caso:
```py
comments_df = df.explode("comments", ignore_index=True)
comments_df.head(4)
```
<table border="1" class="dataframe" style="table-layout: fixed; word-wrap:break-word; width: 100%;">
<thead>
<tr style="text-align: right;">
<th></th>
<th>html_url</th>
<th>title</th>
<th>comments</th>
<th>body</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>https://github.com/huggingface/datasets/issues/2787</td>
<td>ConnectionError: Couldn't reach https://raw.githubusercontent.com</td>
<td>the bug code locate in :\r\n if data_args.task_name is not None...</td>
<td>Hello,\r\nI am trying to run run_glue.py and it gives me this error...</td>
</tr>
<tr>
<th>1</th>
<td>https://github.com/huggingface/datasets/issues/2787</td>
<td>ConnectionError: Couldn't reach https://raw.githubusercontent.com</td>
<td>Hi @jinec,\r\n\r\nFrom time to time we get this kind of `ConnectionError` coming from the github.com website: https://raw.githubusercontent.com...</td>
<td>Hello,\r\nI am trying to run run_glue.py and it gives me this error...</td>
</tr>
<tr>
<th>2</th>
<td>https://github.com/huggingface/datasets/issues/2787</td>
<td>ConnectionError: Couldn't reach https://raw.githubusercontent.com</td>
<td>cannot connect,even by Web browser,please check that there is some problems。</td>
<td>Hello,\r\nI am trying to run run_glue.py and it gives me this error...</td>
</tr>
<tr>
<th>3</th>
<td>https://github.com/huggingface/datasets/issues/2787</td>
<td>ConnectionError: Couldn't reach https://raw.githubusercontent.com</td>
<td>I can access https://raw.githubusercontent.com/huggingface/datasets/1.7.0/datasets/glue/glue.py without problem...</td>
<td>Hello,\r\nI am trying to run run_glue.py and it gives me this error...</td>
</tr>
</tbody>
</table>
Ótimo, podemos ver que as linhas foram replicadas, com a coluna `comments` contendo os comentários individuais! Agora que terminamos com o Pandas, podemos voltar rapidamente para um `Dataset` carregando o `DataFrame` na memória
```py
from datasets import Dataset
comments_dataset = Dataset.from_pandas(comments_df)
comments_dataset
```
```python out
Dataset({
features: ['html_url', 'title', 'comments', 'body'],
num_rows: 2842
})
```
Ok, isso nos deu alguns milhares de comentários para trabalhar!
<Tip>
✏️ **Experimente!** Veja se você pode usar `Dataset.map()` para explodir a coluna `comments` de `issues_dataset` _sem_ recorrer ao uso de Pandas. Isso é um pouco complicado; você pode achar útil para esta tarefa a seção ["Mapeamento em lote"](https://huggingface.co/docs/datasets/v1.12.1/about_map_batch#batch-mapping) da documentação do 🤗 Dataset.
</Tip>
Agora que temos um comentário por linha, vamos criar uma nova coluna `comments_length` que contém o número de palavras por comentário:
```py
comments_dataset = comments_dataset.map(
lambda x: {"comment_length": len(x["comments"].split())}
)
```
Podemos usar essa nova coluna para filtrar comentários curtos, que normalmente incluem coisas como "cc @lewtun" ou "Obrigado!" que não são relevantes para o nosso motor de busca. Não há um número preciso para selecionar o filtro, mas cerca de 15 palavras parece um bom começo:
```py
comments_dataset = comments_dataset.filter(lambda x: x["comment_length"] > 15)
comments_dataset
```
```python out
Dataset({
features: ['html_url', 'title', 'comments', 'body', 'comment_length'],
num_rows: 2098
})
```
Depois de limpar um pouco nosso conjunto de dados, vamos concatenar o título, a descrição e os comentários da issue em uma nova coluna `text`. Como de costume, escreveremos uma função simples que podemos passar para `Dataset.map()`:
```py
def concatenate_text(examples):
return {
"text": examples["title"]
+ " \n "
+ examples["body"]
+ " \n "
+ examples["comments"]
}
comments_dataset = comments_dataset.map(concatenate_text)
```
Finalmente estamos prontos para criar alguns embeddings! Vamos dar uma olhada.
## Criando embeddings de texto
Vimos no [Capítulo 2](/course/chapter2) que podemos obter tokens embeddings usando a classe `AutoModel`. Tudo o que precisamos fazer é escolher um checkpoint adequado para carregar o modelo. Felizmente, existe uma biblioteca chamada `sentence-transformers` dedicada à criação de embeddings. Conforme descrito na [documentação da biblioteca](https://www.sbert.net/examples/applications/semantic-search/README.html#symmetric-vs-asymmetric-semantic-search), nosso caso de uso é um exemplo de _asymmetric semantic search_ porque temos uma consulta curta cuja resposta gostaríamos de encontrar em um documento mais longo, como um comentário da issue. A útil [tabela de visão geral do modelo](https://www.sbert.net/docs/pretrained_models.html#model-overview) na documentação indica que o checkpoint `multi-qa-mpnet-base-dot-v1` tem o melhor desempenho para pesquisa semântica, então usaremos isso para nosso aplicativo. Também carregaremos o tokenizer usando o mesmo checkpoint:
{#if fw === 'pt'}
```py
from transformers import AutoTokenizer, AutoModel
model_ckpt = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
model = AutoModel.from_pretrained(model_ckpt)
```
Para acelerar o processo de embedding, é útil colocar o modelo e as entradas em um dispositivo GPU, então vamos fazer isso agora:
```py
import torch
device = torch.device("cuda")
model.to(device)
```
{:else}
```py
from transformers import AutoTokenizer, TFAutoModel
model_ckpt = "sentence-transformers/multi-qa-mpnet-base-dot-v1"
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
model = TFAutoModel.from_pretrained(model_ckpt, from_pt=True)
```
Observe que definimos `from_pt=True` como um argumento do método `from_pretrained()`. Isso ocorre porque o checkpoint `multi-qa-mpnet-base-dot-v1` só tem pesos PyTorch, portanto, definir `from_pt=True` irá convertê-los automaticamente para o formato TensorFlow para nós. Como você pode ver, é muito simples alternar entre frameworks no 🤗 Transformers!
{/if}
Como mencionamos anteriormente, gostaríamos de representar cada entrada em nosso corpus de issues do GitHub como um único vetor, portanto, precisamos "pool" ou calcular a média de nossas incorporações de token de alguma forma. Uma abordagem popular é realizar *CLS pooling* nas saídas do nosso modelo, onde simplesmente coletamos o último estado oculto para o token especial `[CLS]`. A função a seguir faz o truque para nós:
```py
def cls_pooling(model_output):
return model_output.last_hidden_state[:, 0]
```
Em seguida, criaremos uma função auxiliar que tokenizará uma lista de documentos, colocará os tensores na GPU, os alimentará no modelo e, finalmente, aplicará o agrupamento CLS às saídas:
{#if fw === 'pt'}
```py
def get_embeddings(text_list):
encoded_input = tokenizer(
text_list, padding=True, truncation=True, return_tensors="pt"
)
encoded_input = {k: v.to(device) for k, v in encoded_input.items()}
model_output = model(**encoded_input)
return cls_pooling(model_output)
```
Podemos testar o funcionamento da função alimentando-a com a primeira entrada de texto em nosso corpus e inspecionando a forma de saída:
```py
embedding = get_embeddings(comments_dataset["text"][0])
embedding.shape
```
```python out
torch.Size([1, 768])
```
Ótimo, convertemos a primeira entrada em nosso corpus em um vetor de 768 dimensões! Podemos usar `Dataset.map()` para aplicar nossa função `get_embeddings()` a cada linha em nosso corpus, então vamos criar uma nova coluna `embeddings` da seguinte forma:
```py
embeddings_dataset = comments_dataset.map(
lambda x: {"embeddings": get_embeddings(x["text"]).detach().cpu().numpy()[0]}
)
```
{:else}
```py
def get_embeddings(text_list):
encoded_input = tokenizer(
text_list, padding=True, truncation=True, return_tensors="tf"
)
encoded_input = {k: v for k, v in encoded_input.items()}
model_output = model(**encoded_input)
return cls_pooling(model_output)
```
Podemos testar o funcionamento da função alimentando-a com a primeira entrada de texto em nosso corpus e inspecionando a forma de saída:
```py
embedding = get_embeddings(comments_dataset["text"][0])
embedding.shape
```
```python out
TensorShape([1, 768])
```
Ótimo, convertemos a primeira entrada em nosso corpus em um vetor de 768 dimensões! Podemos usar `Dataset.map()` para aplicar nossa função `get_embeddings()` a cada linha em nosso corpus, então vamos criar uma nova coluna `embeddings` da seguinte forma:
```py
embeddings_dataset = comments_dataset.map(
lambda x: {"embeddings": get_embeddings(x["text"]).numpy()[0]}
)
```
{/if}
Observe que convertemos os embeddings em arrays NumPy -- isso porque 🤗 Datasets requer esse formato quando tentamos indexá-los com FAISS, o que faremos a seguir.
## Usando FAISS para busca de similaridade
Agora que temos um conjunto de dados de embeddings, precisamos de alguma maneira de pesquisá-los. Para fazer isso, usaremos uma estrutura de dados especial em 🤗 Datasets chamada _FAISS index_. [FAISS](https://faiss.ai/) (abreviação de Facebook AI Similarity Search) é uma biblioteca que fornece algoritmos eficientes para pesquisar rapidamente e agrupar vetores de incorporação.
A idéia básica por trás do FAISS é criar uma estrutura de dados especial chamada _index_ que permite descobrir quais embeddings são semelhantes a um embedding de entrada. Criar um índice FAISS em 🤗 Datasets é simples -- usamos a função `Dataset.add_faiss_index()` e especificamos qual coluna do nosso conjunto de dados gostaríamos de indexar:
```py
embeddings_dataset.add_faiss_index(column="embeddings")
```
Agora podemos realizar consultas neste índice fazendo uma pesquisa do vizinho mais próximo com a função `Dataset.get_nearest_examples()`. Vamos testar isso primeiro incorporando uma pergunta da seguinte forma:
{#if fw === 'pt'}
```py
question = "How can I load a dataset offline?"
question_embedding = get_embeddings([question]).cpu().detach().numpy()
question_embedding.shape
```
```python out
torch.Size([1, 768])
```
{:else}
```py
question = "How can I load a dataset offline?"
question_embedding = get_embeddings([question]).numpy()
question_embedding.shape
```
```python out
(1, 768)
```
{/if}
Assim como com os documentos, agora temos um vetor de 768 dimensões representando a consulta, que podemos comparar com todo o corpus para encontrar os embeddings mais semelhantes:
```py
scores, samples = embeddings_dataset.get_nearest_examples(
"embeddings", question_embedding, k=5
)
```
A função `Dataset.get_nearest_examples()` retorna uma tupla de pontuações que classificam a sobreposição entre a consulta e o documento e um conjunto correspondente de amostras (aqui, as 5 melhores correspondências). Vamos coletá-los em um `pandas.DataFrame` para que possamos classificá-los facilmente:
```py
import pandas as pd
samples_df = pd.DataFrame.from_dict(samples)
samples_df["scores"] = scores
samples_df.sort_values("scores", ascending=False, inplace=True)
```
Agora podemos iterar nas primeiras linhas para ver como nossa consulta correspondeu aos comentários disponíveis:
```py
for _, row in samples_df.iterrows():
print(f"COMMENT: {row.comments}")
print(f"SCORE: {row.scores}")
print(f"TITLE: {row.title}")
print(f"URL: {row.html_url}")
print("=" * 50)
print()
```
```python out
"""
COMMENT: Requiring online connection is a deal breaker in some cases unfortunately so it'd be great if offline mode is added similar to how `transformers` loads models offline fine.
@mandubian's second bullet point suggests that there's a workaround allowing you to use your offline (custom?) dataset with `datasets`. Could you please elaborate on how that should look like?
SCORE: 25.505046844482422
TITLE: Discussion using datasets in offline mode
URL: https://github.com/huggingface/datasets/issues/824
==================================================
COMMENT: The local dataset builders (csv, text , json and pandas) are now part of the `datasets` package since #1726 :)
You can now use them offline
\`\`\`python
datasets = load_dataset("text", data_files=data_files)
\`\`\`
We'll do a new release soon
SCORE: 24.555509567260742
TITLE: Discussion using datasets in offline mode
URL: https://github.com/huggingface/datasets/issues/824
==================================================
COMMENT: I opened a PR that allows to reload modules that have already been loaded once even if there's no internet.
Let me know if you know other ways that can make the offline mode experience better. I'd be happy to add them :)
I already note the "freeze" modules option, to prevent local modules updates. It would be a cool feature.
----------
> @mandubian's second bullet point suggests that there's a workaround allowing you to use your offline (custom?) dataset with `datasets`. Could you please elaborate on how that should look like?
Indeed `load_dataset` allows to load remote dataset script (squad, glue, etc.) but also you own local ones.
For example if you have a dataset script at `./my_dataset/my_dataset.py` then you can do
\`\`\`python
load_dataset("./my_dataset")
\`\`\`
and the dataset script will generate your dataset once and for all.
----------
About I'm looking into having `csv`, `json`, `text`, `pandas` dataset builders already included in the `datasets` package, so that they are available offline by default, as opposed to the other datasets that require the script to be downloaded.
cf #1724
SCORE: 24.14896583557129
TITLE: Discussion using datasets in offline mode
URL: https://github.com/huggingface/datasets/issues/824
==================================================
COMMENT: > here is my way to load a dataset offline, but it **requires** an online machine
>
> 1. (online machine)
>
> ```
>
> import datasets
>
> data = datasets.load_dataset(...)
>
> data.save_to_disk(/YOUR/DATASET/DIR)
>
> ```
>
> 2. copy the dir from online to the offline machine
>
> 3. (offline machine)
>
> ```
>
> import datasets
>
> data = datasets.load_from_disk(/SAVED/DATA/DIR)
>
> ```
>
>
>
> HTH.
SCORE: 22.893993377685547
TITLE: Discussion using datasets in offline mode
URL: https://github.com/huggingface/datasets/issues/824
==================================================
COMMENT: here is my way to load a dataset offline, but it **requires** an online machine
1. (online machine)
\`\`\`
import datasets
data = datasets.load_dataset(...)
data.save_to_disk(/YOUR/DATASET/DIR)
\`\`\`
2. copy the dir from online to the offline machine
3. (offline machine)
\`\`\`
import datasets
data = datasets.load_from_disk(/SAVED/DATA/DIR)
\`\`\`
HTH.
SCORE: 22.406635284423828
TITLE: Discussion using datasets in offline mode
URL: https://github.com/huggingface/datasets/issues/824
==================================================
"""
```
Nada mal! Nosso segundo resultado parece corresponder à consulta.
<Tip>
✏️ **Experimente!** Crie sua própria consulta e veja se consegue encontrar uma resposta nos documentos recuperados. Você pode ter que aumentar o parâmetro `k` em `Dataset.get_nearest_examples()` para ampliar a pesquisa.
</Tip> | course/chapters/pt/chapter5/6.mdx/0 | {
"file_path": "course/chapters/pt/chapter5/6.mdx",
"repo_id": "course",
"token_count": 8460
} | 139 |
# Обработка естественного языка
<CourseFloatingBanner
chapter={1}
classNames="absolute z-10 right-0 top-0"
/>
Прежде, чем перейти к трансформерам, сделаем быстрый обзор того, что такое обработка естественного языка (NLP), и почему мы заинтересованы в этой сфере.
## Что такое NLP?
NLP - область лингвистики и машинного обучения, которая изучает все, что связано с естественными языками. Главная цель NLP не просто понимать отдельные слова, но и иметь возможность понимать контекст, в котором эти слова находятся.
Список типичных NLP-задач с некоторыми примерами:
- **Классификация предложений**: определить эмоциональную окраску отзыва, детектировать среди входящих писем спам, определить грамматическую правильность предложения или даже проверить, являются ли два предложения связанными между собой логически
- **Классификация каждого слова в предложении**: вычленить грамматические составляющие предложения (существительное, глагол, прилагательное) или определить именованные сущности (персона, локация, организация)
- **Генерация текста**: закончить предложение на основе некоторого запроса, заполнить пропуски в тексте, содержащем замаскированные слова
- **Сформулировать ответ на вопрос**: получить ответ на заданный по тексту вопрос
- **Сгенерировать новое предложение исходя из предложенного**: перевести текст с одного языка на другой, выполнить автоматическое реферирование текста
NLP не ограничивается только письменным текстом. Есть множество сложных задач, связанных с распознаванием речи и компьютерным зрением, таких как транскрибирование аудио или описание изображений.
## Почему это сложно?
Компьютеры не обрабатывают информацию так же, как люди. Например, когда мы читаем предложение «Я голоден», мы можем легко понять его значение. Точно так же, имея два предложения, такие как «Я голоден» и «Мне грустно», мы можем легко определить, насколько они похожи. Для моделей машинного обучения (ML) такие задачи сложнее. Текст должен быть обработан так, чтобы модель могла учиться на нем. А поскольку язык сложен, нам нужно тщательно продумать, как должна выполняться эта обработка. Было проведено много исследований того, как представлять текст, и мы рассмотрим некоторые методы в следующей главе.
| course/chapters/ru/chapter1/2.mdx/0 | {
"file_path": "course/chapters/ru/chapter1/2.mdx",
"repo_id": "course",
"token_count": 2304
} | 140 |
<FrameworkSwitchCourse {fw} />
# Введение
<CourseFloatingBanner
chapter={3}
classNames="absolute z-10 right-0 top-0"
/>
В [главе 2](../chapter2/1) мы увидели, как можно использовать токенизаторы и предобученные модели для построения предсказаний. Но что если мы хотим дообучить предобученную модель на собственном датасете? Это и есть тема данной главы! Мы изучим:
{#if fw === 'pt'}
* Как подготовить большой датасет из Model Hub
* Как использовать высокоуровненое API для дообучения модели
* Как использовать собственный цикл обучения (training loop)
* Как использовать библиотеку 🤗 Accelerate для запуска собственного цикла обучения на распределенной вычислительной структуре
{:else}
* Как подготовить большой датасет из Model Hub
* Как использовать Keras для дообучения модели
* Как использовать Keras для получения предсказаний
* Как использовать собственную метрику
{/if}
Чтобы загрузить свои чекпоинты на Hugging Face Hub, необходимо иметь учетную запись: [создать аккаунт](https://huggingface.co/join) | course/chapters/ru/chapter3/1.mdx/0 | {
"file_path": "course/chapters/ru/chapter3/1.mdx",
"repo_id": "course",
"token_count": 911
} | 141 |
# Big data? 🤗 Datasets спешат на помощь!
<CourseFloatingBanner chapter={5}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter5/section4.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter5/section4.ipynb"},
]} />
В настоящее время нередко приходится работать с многогигабайтными наборами данных, особенно если вы планируете предварительно обучить трансформер, такой как BERT или GPT-2, с нуля. В этих случаях даже _загрузка_ данных может стать проблемой. Например, корпус WebText, используемый для предобучения GPT-2, состоит из более чем 8 миллионов документов и 40 ГБ текста — загрузка этого в оперативную память вашего ноутбука может привести к сердечному приступу!
К счастью, 🤗 Datasets спроектирована так, что позволит избежать таких ограничений. Библиотека избавляет вас от необходимости управлять памятью и рассматривает датасеты как [файлы, отображаемые в память](https://habr.com/ru/post/55716/) (memory-mapped files, MMF), также обходит ограничения жестких дисков путем формирования потоков записей из корпуса текстов.
<Youtube id="JwISwTCPPWo"/>
В этом разделе мы рассмотрим эти особенности 🤗 Datasets с огромным корпусом объемом 825 ГБ, известным как [Pile] (https://pile.eleuther.ai). Давайте начнем!
## Что такое the Pile?
The Pile — это корпус текстов на английском языке, созданный [EleutherAI] (https://www.eleuther.ai) для обучения крупномасштабных языковых моделей. Он включает в себя широкий спектр наборов данных, включая научные статьи, репозитории кода GitHub и отфильтрованный веб-текст. Учебный корпус доступен в виде [фрагментов по 14 ГБ] (https://mystic.the-eye.eu/public/AI/pile/), и вы также можете загрузить несколько [отдельных компонентов] (https://mystic .the-eye.eu/public/AI/pile_preliminary_components/). Начнем с набора данных PubMed Abstracts, который представляет собой свод аннотаций из 15 миллионов биомедицинских публикаций в [PubMed] (https://pubmed.ncbi.nlm.nih.gov/). Набор данных находится в [формате JSON Lines] (https://jsonlines.org) и сжат с использованием библиотеки `zstandard`, поэтому сначала нам нужно установить библиотеку `zstandart`:
```py
!pip install zstandard
```
Затем мы можем загрузить набор данных, используя метод для подгрузки файлов, который мы изучили в [разделе 2](../chapter5/2):
```py
from datasets import load_dataset
# Этой займет несколько минут, пока ожидаете – сделайте кофе или чай :)
data_files = "https://mystic.the-eye.eu/public/AI/pile_preliminary_components/PUBMED_title_abstracts_2019_baseline.jsonl.zst"
pubmed_dataset = load_dataset("json", data_files=data_files, split="train")
pubmed_dataset
```
```python out
Dataset({
features: ['meta', 'text'],
num_rows: 15518009
})
```
Мы видим, что в нашем наборе данных 15 518 009 строк и 2 столбца — это очень много!
<Tip>
✎ По умолчанию 🤗 Datasets распаковывает файлы, необходимые для загрузки набора данных. Если вы хотите сохранить место на жестком диске, вы можете передать `DownloadConfig(delete_extracted=True)` в аргумент `download_config` функции `load_dataset()`. Дополнительные сведения см. в [документации](https://huggingface.co/docs/datasets/package_reference/builder_classes#datasets.DownloadConfig).
</Tip>
Давайте посмотрим на содержимое первого экземпляра:
```py
pubmed_dataset[0]
```
```python out
{'meta': {'pmid': 11409574, 'language': 'eng'},
'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection.\nTo determine the prevalence of hypoxaemia in children aged under 5 years suffering acute lower respiratory infections (ALRI), the risk factors for hypoxaemia in children under 5 years of age with ALRI, and the association of hypoxaemia with an increased risk of dying in children of the same age ...'}
```
Отлично, выглядит как аннотация медицинской статьи. Теперь давайте посмотрим объем памяти, который мы использовали при загрузке данных:
## Магия отображения в память
Простой способ измерить использование памяти в Python — использовать библиотеку [`psutil`](https://psutil.readthedocs.io/en/latest/), которую можно установить с помощью `pip` следующим образом:
```python
!pip install psutil
```
Она предоставляет класс Process, который позволяет нам проверить использование памяти текущим процессом следующим образом:
```py
import psutil
# Process.memory_info вовзращает объем в байтах, мы пересчитаем в мегабайты
print(f"RAM used: {psutil.Process().memory_info().rss / (1024 * 1024):.2f} MB")
```
```python out
RAM used: 5678.33 MB
```
Здесь атрибут `rss` относится к _резидентному размеру набора_, который представляет собой долю памяти, которую процесс занимает в ОЗУ. Это измерение также включает память, используемую интерпретатором Python и загруженными нами библиотеками, поэтому фактический объем памяти, используемый для загрузки набора данных, немного меньше. Для сравнения давайте посмотрим, насколько велик набор данных на диске, используя атрибут `dataset_size`. Поскольку результат, как и раньше, выражается в байтах, нам нужно вручную преобразовать его в гигабайты:
```py
print(f"Number of files in dataset : {pubmed_dataset.dataset_size}")
size_gb = pubmed_dataset.dataset_size / (1024**3)
print(f"Dataset size (cache file) : {size_gb:.2f} GB")
```
```python out
Number of files in dataset : 20979437051
Dataset size (cache file) : 19.54 GB
```
Приятно — несмотря на то, что он весит почти 20 ГБ, мы можем загрузить и получить доступ к набору данных с гораздо меньшим объемом оперативной памяти!
<Tip>
✏️ **Попробуйте!** Выберите один из [компонентов](https://mystic.the-eye.eu/public/AI/pile_preliminary_components/) из Pile, который больше, чем оперативная память вашего ноутбука или настольного компьютера, загрузите его с 🤗 Datasets и измерьте объем используемой оперативной памяти. Обратите внимание, что для получения точных измерений вам потребуется сделать это в новом процессе. Вы можете найти распакованные размеры каждого компонента в Таблице 1 [документации Pile] (https://arxiv.org/abs/2101.00027).
</Tip>
Если вы знакомы с Pandas, этот результат может стать неожиданностью из-за знаменитого [эмпирического правила] Уэса Кинни (https://wesmckinney.com/blog/apache-arrow-pandas-internals/), согласно которому вам обычно требуется 5 до 10 раз больше оперативной памяти, чем размер вашего набора данных. Так как же 🤗 Datasets решают эту проблему управления памятью? 🤗 Datasets рассматривают каждый набор данных как [файл с отображением в память] (https://en.wikipedia.org/wiki/Memory-mapped_file), который обеспечивает сопоставление между оперативной памятью и хранилищем файловой системы, что позволяет библиотеке получать доступ к элементам и работать с ними без необходимости полной загрузки его в память.
Memory-mapped файлы также могут совместно использоваться несколькими процессами, что позволяет распараллеливать такие методы, как `Dataset.map()`, без необходимости перемещать или копировать набор данных. Под капотом все эти возможности реализованы в формате [Apache Arrow](https://arrow.apache.org) и [`pyarrow`](https://arrow.apache.org/docs/python/index. .html), которые делают загрузку и обработку данных молниеносной. (Для получения более подробной информации об Apache Arrow и сравнении с Pandas ознакомьтесь с [публикацией в блоге Деяна Симика] (https://towardsdatascience.com/apache-arrow-read-dataframe-with-zero-memory-69634092b1a). Чтобы увидеть это в действии давайте проведем небольшой тест скорости, перебирая все элементы в наборе данных PubMed Abstracts:
```py
import timeit
code_snippet = """batch_size = 1000
for idx in range(0, len(pubmed_dataset), batch_size):
_ = pubmed_dataset[idx:idx + batch_size]
"""
time = timeit.timeit(stmt=code_snippet, number=1, globals=globals())
print(
f"Iterated over {len(pubmed_dataset)} examples (about {size_gb:.1f} GB) in "
f"{time:.1f}s, i.e. {size_gb/time:.3f} GB/s"
)
```
```python out
'Iterated over 15518009 examples (about 19.5 GB) in 64.2s, i.e. 0.304 GB/s'
```
Здесь мы использовали модуль `timeit` Python для измерения времени выполнения `code_snippet`. Обычно вы сможете перебирать набор данных со скоростью от нескольких десятых долей ГБ/с до нескольких ГБ/с. Это прекрасно работает для подавляющего большинства приложений, но иногда вам придется работать с набором данных, который слишком велик даже для хранения на жестком диске вашего ноутбука. Например, если бы мы попытались загрузить весь Pile, нам потребовалось бы 825 ГБ свободного места на диске! Чтобы справиться с такими случаями 🤗 Datasets предоставляют функцию потоковой передачи, которая позволяет нам загружать и получать доступ к элементам на лету, без необходимости загружать весь набор данных. Давайте посмотрим, как это работает.
<Tip>
💡 В Jupyter notebooks вы также можете измерить время исполнения ячейки с использованием [`%%timeit` magic function](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-timeit).
</Tip>
## Потоковая передача датасета
Чтобы включить потоковую передачу набора данных, вам просто нужно передать аргумент `streaming=True` в функцию `load_dataset()`. Например, давайте снова загрузим набор данных PubMed Abstracts, но в потоковом режиме:
```py
pubmed_dataset_streamed = load_dataset(
"json", data_files=data_files, split="train", streaming=True
)
```
Вместо знакомого `Dataset`, с которым мы уже встречались в других местах этой главы, объект, возвращаемый с `streaming=True`, является `IterableDataset`. Как следует из названия, чтобы получить доступ к элементам `IterableDataset`, нам нужно выполнить итерацию по нему. Мы можем получить доступ к первому элементу нашего набора потоковых данных следующим образом:
```py
next(iter(pubmed_dataset_streamed))
```
```python out
{'meta': {'pmid': 11409574, 'language': 'eng'},
'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection.\nTo determine the prevalence of hypoxaemia in children aged under 5 years suffering acute lower respiratory infections (ALRI), the risk factors for hypoxaemia in children under 5 years of age with ALRI, and the association of hypoxaemia with an increased risk of dying in children of the same age ...'}
```
Элементы из потокового набора данных можно обрабатывать на лету с помощью `IterableDataset.map()`, что полезно во время обучения, если вам нужно токенизировать входные данные. Процесс точно такой же, как тот, который мы использовали для токенизации нашего набора данных в [Главе 3](../chapter3/1), с той лишь разницей, что выходные данные возвращаются один за другим:
```py
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased")
tokenized_dataset = pubmed_dataset_streamed.map(lambda x: tokenizer(x["text"]))
next(iter(tokenized_dataset))
```
```python out
{'input_ids': [101, 4958, 5178, 4328, 6779, ...], 'attention_mask': [1, 1, 1, 1, 1, ...]}
```
<Tip>
💡 Чтобы ускорить токенизацию с потоковой передачей, вы можете передать `batched=True`, как мы делали в последнем разделе. Он будет обрабатывать примеры батчами; размер батча по умолчанию составляет 1000 и может быть указан в аргументе `batch_size`.
</Tip>
Вы также можете перемешать потоковые наборы данных, используя `IterableDataset.shuffle()`, но в отличие от `Dataset.shuffle()`, это только перемешивает элементы в предопределенном `buffer_size`:
```py
shuffled_dataset = pubmed_dataset_streamed.shuffle(buffer_size=10_000, seed=42)
next(iter(shuffled_dataset))
```
```python out
{'meta': {'pmid': 11410799, 'language': 'eng'},
'text': 'Randomized study of dose or schedule modification of granulocyte colony-stimulating factor in platinum-based chemotherapy for elderly patients with lung cancer ...'}
```
В этом примере мы выбрали случайный пример из первых 10 000 примеров в буфере. После обращения к примеру его место в буфере заполняется следующим примером в корпусе (т. е. 10 001-м примером в приведенном выше случае). Вы также можете выбирать элементы из потокового набора данных, используя функции `IterableDataset.take()` и `IterableDataset.skip()`, которые действуют аналогично `Dataset.select()`. Например, чтобы выбрать первые 5 примеров в наборе данных PubMed Abstracts, мы можем сделать следующее:
```py
dataset_head = pubmed_dataset_streamed.take(5)
list(dataset_head)
```
```python out
[{'meta': {'pmid': 11409574, 'language': 'eng'},
'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection ...'},
{'meta': {'pmid': 11409575, 'language': 'eng'},
'text': 'Clinical signs of hypoxaemia in children with acute lower respiratory infection: indicators of oxygen therapy ...'},
{'meta': {'pmid': 11409576, 'language': 'eng'},
'text': "Hypoxaemia in children with severe pneumonia in Papua New Guinea ..."},
{'meta': {'pmid': 11409577, 'language': 'eng'},
'text': 'Oxygen concentrators and cylinders ...'},
{'meta': {'pmid': 11409578, 'language': 'eng'},
'text': 'Oxygen supply in rural africa: a personal experience ...'}]
```
Точно так же вы можете использовать функцию `IterableDataset.skip()` для создания обучающих и проверочных сплитов из перемешанного набора данных следующим образом:
```py
# Пропустить первые 1000 объектов и включить остальные в обучающую выборку
train_dataset = shuffled_dataset.skip(1000)
# Взять первые 1000 объектов в валидационную выборку
validation_dataset = shuffled_dataset.take(1000)
```
Давайте завершим наше исследование потоковой передачи наборов данных общим приложением: объединение нескольких наборов данных вместе для создания единого корпуса. 🤗 Datasets предоставляют функцию `interleave_datasets()`, которая преобразует список объектов `IterableDataset` в один `IterableDataset`, где элементы нового набора данных получаются путем чередования исходных примеров. Эта функция особенно полезна, когда вы пытаетесь объединить большие наборы данных, поэтому в качестве примера давайте воспроизведем компонент FreeLaw из Pile, который представляет собой набор данных юридических заключений судов США объемом 51 ГБ:
```py
law_dataset_streamed = load_dataset(
"json",
data_files="https://mystic.the-eye.eu/public/AI/pile_preliminary_components/FreeLaw_Opinions.jsonl.zst",
split="train",
streaming=True,
)
next(iter(law_dataset_streamed))
```
```python out
{'meta': {'case_ID': '110921.json',
'case_jurisdiction': 'scotus.tar.gz',
'date_created': '2010-04-28T17:12:49Z'},
'text': '\n461 U.S. 238 (1983)\nOLIM ET AL.\nv.\nWAKINEKONA\nNo. 81-1581.\nSupreme Court of United States.\nArgued January 19, 1983.\nDecided April 26, 1983.\nCERTIORARI TO THE UNITED STATES COURT OF APPEALS FOR THE NINTH CIRCUIT\n*239 Michael A. Lilly, First Deputy Attorney General of Hawaii, argued the cause for petitioners. With him on the brief was James H. Dannenberg, Deputy Attorney General...'}
```
Этот набор данных достаточно велик, чтобы нагружать оперативную память большинства ноутбуков, но мы смогли загрузить его и получить к нему доступ! Давайте теперь объединим примеры из наборов данных FreeLaw и PubMed Abstracts с функцией `interleave_datasets()`:
```py
from itertools import islice
from datasets import interleave_datasets
combined_dataset = interleave_datasets([pubmed_dataset_streamed, law_dataset_streamed])
list(islice(combined_dataset, 2))
```
```python out
[{'meta': {'pmid': 11409574, 'language': 'eng'},
'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection ...'},
{'meta': {'case_ID': '110921.json',
'case_jurisdiction': 'scotus.tar.gz',
'date_created': '2010-04-28T17:12:49Z'},
'text': '\n461 U.S. 238 (1983)\nOLIM ET AL.\nv.\nWAKINEKONA\nNo. 81-1581.\nSupreme Court of United States.\nArgued January 19, 1983.\nDecided April 26, 1983.\nCERTIORARI TO THE UNITED STATES COURT OF APPEALS FOR THE NINTH CIRCUIT\n*239 Michael A. Lilly, First Deputy Attorney General of Hawaii, argued the cause for petitioners. With him on the brief was James H. Dannenberg, Deputy Attorney General...'}]
```
Здесь мы использовали функцию `islice()` из модуля `itertools` Python, чтобы выбрать первые два объекта из объединенного набора данных, и мы видим, что они соответствуют первым примерам из каждого из двух исходных наборов данных.
Наконец, если вы хотите получить в потоковом режиме весь Pile целиком (825 ГБ), вы можете получить все подготовленные файлы следующим образом:
```py
base_url = "https://mystic.the-eye.eu/public/AI/pile/"
data_files = {
"train": [base_url + "train/" + f"{idx:02d}.jsonl.zst" for idx in range(30)],
"validation": base_url + "val.jsonl.zst",
"test": base_url + "test.jsonl.zst",
}
pile_dataset = load_dataset("json", data_files=data_files, streaming=True)
next(iter(pile_dataset["train"]))
```
```python out
{'meta': {'pile_set_name': 'Pile-CC'},
'text': 'It is done, and submitted. You can play “Survival of the Tastiest” on Android, and on the web...'}
```
<Tip>
✏️ **Попробуйте!** Используйте один из больших корпусов Common Crawl, например [`mc4`](https://huggingface.co/datasets/mc4) или [`oscar`](https://huggingface.co/ datasets/oscar) для создания потокового многоязычного набора данных, который представляет пропорции разговорных языков в стране по вашему выбору. Например, в Швейцарии есть четыре национальных языка: немецкий, французский, итальянский и рето-романский, поэтому вы можете попробовать создать швейцарский корпус, выбрав подмножества Оскаров в соответствии с их разговорной пропорцией.
</Tip>
Теперь у вас есть все инструменты, необходимые для загрузки и обработки наборов данных всех форм и размеров, но, если только вам не повезет, в вашем путешествии по НЛП наступит момент, когда вам придется фактически создать собственный набор данных для решения проблемы. Это тема следующего раздела!
| course/chapters/ru/chapter5/4.mdx/0 | {
"file_path": "course/chapters/ru/chapter5/4.mdx",
"repo_id": "course",
"token_count": 13225
} | 142 |
<FrameworkSwitchCourse {fw} />
# Классификация токенов[[token-classification]]
{#if fw === 'pt'}
<CourseFloatingBanner chapter={7}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter7/section2_pt.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter7/section2_pt.ipynb"},
]} />
{:else}
<CourseFloatingBanner chapter={7}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter7/section2_tf.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter7/section2_tf.ipynb"},
]} />
{/if}
Первое приложение, которое мы рассмотрим, - это классификация токенов. Эта общая задача охватывает любую проблему, которую можно сформулировать как "присвоение метки каждому токену в предложении", например:
- **Распознавание именованных сущностей (Named entity recognition - NER)**: Поиск сущностей (например, лиц, мест или организаций) в предложении. Это можно сформулировать как приписывание метки каждому токену, имея один класс для сущности и один класс для "нет сущности".
- **Морфологическая разметка (Part-of-speech tagging - POS)**: Пометить каждое слово в предложении как соответствующее определенной части речи (например, существительное, глагол, прилагательное и т. д.).
- **Выделение токенов (Chunking)**: Поиск токенов, принадлежащих одной и той же сущности. Эта задача (которая может быть объединена с POS или NER) может быть сформулирована как присвоение одной метки (обычно `B-`) всем токенам, которые находятся в начале фрагмента текста, другой метки (обычно `I-`) - токенам, которые находятся внутри фрагмента текста, и третьей метки (обычно `O`) - токенам, которые не принадлежат ни к одному фрагменту.
<Youtube id="wVHdVlPScxA"/>
Конечно, существует множество других типов задач классификации токенов; это лишь несколько показательных примеров. В этом разделе мы дообучим модель (BERT) для задачи NER, которая затем сможет вычислять прогнозы, подобные этому:
<iframe src="https://course-demos-bert-finetuned-ner.hf.space" frameBorder="0" height="350" title="Gradio app" class="block dark:hidden container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
<a class="flex justify-center" href="/huggingface-course/bert-finetuned-ner">
<img class="block dark:hidden lg:w-3/5" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter7/model-eval-bert-finetuned-ner.png" alt="One-hot encoded labels for question answering."/>
<img class="hidden dark:block lg:w-3/5" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter7/model-eval-bert-finetuned-ner-dark.png" alt="One-hot encoded labels for question answering."/>
</a>
Вы можете найти модель, которую мы обучим и загрузим на хаб, и перепроверить ее предсказания [здесь](https://huggingface.co/huggingface-course/bert-finetuned-ner?text=My+name+is+Sylvain+and+I+work+at+Hugging+Face+in+Brooklyn).
## Подготовка данных[[preparing-the-data]]
Прежде всего, нам нужен набор данных, подходящий для классификации токенов. В этом разделе мы будем использовать [набор данных CoNLL-2003](https://huggingface.co/datasets/conll2003), который содержит новости от Reuters.
<Tip>
💡 Если ваш набор данных состоит из текстов, часть которых состоит из слов с соответствующими метками, вы сможете адаптировать описанные здесь процедуры обработки данных к своему набору данных. Обратитесь к [Главе 5](../chapter5/1), если вам нужно освежить в памяти то, как загружать собственные данные в `Dataset`.
</Tip>
### Датасет CoNLL-2003[[the-conll-2003-dataset]]
Для загрузки датасета CoNLL-2003 мы используем метод `load_dataset()` из библиотеки 🤗 Datasets:
```py
from datasets import load_dataset
raw_datasets = load_dataset("conll2003")
```
Это позволит загрузить и кэшировать датасет, как мы видели в [Главе 3](../chapter3/1) для датасета GLUE MRPC. Изучение этого объекта показывает нам присутствующие столбцы и части тренировочного, проверочного и тестового наборов:
```py
raw_datasets
```
```python out
DatasetDict({
train: Dataset({
features: ['chunk_tags', 'id', 'ner_tags', 'pos_tags', 'tokens'],
num_rows: 14041
})
validation: Dataset({
features: ['chunk_tags', 'id', 'ner_tags', 'pos_tags', 'tokens'],
num_rows: 3250
})
test: Dataset({
features: ['chunk_tags', 'id', 'ner_tags', 'pos_tags', 'tokens'],
num_rows: 3453
})
})
```
В частности, мы видим, что датасет содержит метки для трех задач, о которых мы говорили ранее: NER, POS и chunking. Существенным отличием от других датасетов является то, что входные тексты представлены не как предложения или документы, а как списки слов (последний столбец называется `tokens`, но он содержит слова в том смысле, что это предварительно токинизированные входные данные, которые еще должны пройти через токенизатор для токенизации по подсловам).
Давайте посмотрим на первый элемент обучающего набора:
```py
raw_datasets["train"][0]["tokens"]
```
```python out
['EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'lamb', '.']
```
Поскольку мы хотим выполнить распознавание именованных сущностей, мы изучим теги NER:
```py
raw_datasets["train"][0]["ner_tags"]
```
```python out
[3, 0, 7, 0, 0, 0, 7, 0, 0]
```
Это метки в виде целых чисел, готовые для обучения, но они не всегда полезны, когда мы хотим проанализировать данные. Как и в случае с классификацией текста, мы можем получить доступ к соответствию между этими целыми числами и названиями меток, посмотрев на атрибут `features` нашего датасета:
```py
ner_feature = raw_datasets["train"].features["ner_tags"]
ner_feature
```
```python out
Sequence(feature=ClassLabel(num_classes=9, names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', 'B-MISC', 'I-MISC'], names_file=None, id=None), length=-1, id=None)
```
Таким образом, этот столбец содержит элементы, которые являются последовательностями `ClassLabel`. Тип элементов последовательности указан в атрибуте `feature` этого `ner_feature`, и мы можем получить доступ к списку имен, посмотрев на атрибут `names` этого `feature`:
```py
label_names = ner_feature.feature.names
label_names
```
```python out
['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC', 'B-MISC', 'I-MISC']
```
Мы уже видели эти метки при изучении конвейера `token-classification` в [Главе 6](../chapter6/3), но для краткости напомним:
- `O` означает, что слово не соответствует какой-либо сущности.
- `B-PER`/`I-PER` означает, что слово соответствует началу/находится внутри сущности персоны *person*.
- `B-ORG`/`I-ORG` означает, что слово соответствует началу/находится внутри сущности *organization*.
- `B-LOC`/`I-LOC` означает, что слово соответствует началу/находится внутри сущности *location*.
- `B-MISC`/`I-MISC` означает, что слово соответствует началу/находится внутри сущности *miscellaneous*.
Теперь декодирование меток, которые мы видели ранее, дает нам следующее:
```python
words = raw_datasets["train"][0]["tokens"]
labels = raw_datasets["train"][0]["ner_tags"]
line1 = ""
line2 = ""
for word, label in zip(words, labels):
full_label = label_names[label]
max_length = max(len(word), len(full_label))
line1 += word + " " * (max_length - len(word) + 1)
line2 += full_label + " " * (max_length - len(full_label) + 1)
print(line1)
print(line2)
```
```python out
'EU rejects German call to boycott British lamb .'
'B-ORG O B-MISC O O O B-MISC O O'
```
В качестве примера смешивания меток `B-` и `I-`, вот что дает тот же код для элемента обучающего множества с индексом 4:
```python out
'Germany \'s representative to the European Union \'s veterinary committee Werner Zwingmann said on Wednesday consumers should buy sheepmeat from countries other than Britain until the scientific advice was clearer .'
'B-LOC O O O O B-ORG I-ORG O O O B-PER I-PER O O O O O O O O O O O B-LOC O O O O O O O'
```
Как мы видим, сущностям, состоящим из двух слов, например "European Union" и "Werner Zwingmann", присваивается метка `B-` для первого слова и метка `I-` для второго.
<Tip>
✏️ **Попробуйте!** Выведите те же два предложения с метками POS или chunking.
</Tip>
### Обработка данных[[processing-the-data]]
<Youtube id="iY2AZYdZAr0"/>
Как обычно, наши тексты должны быть преобразованы в идентификаторы токенов, прежде чем модель сможет понять их смысл. Как мы видели в [Главе 6](../chapter6/), существенным отличием задачи классификации токенов является то, что у нас есть предварительно токенизированные входные данные. К счастью, API токенизатора справляется с этим довольно легко; нам просто нужно предупредить `tokenizer` специальным флагом.
Для начала давайте создадим объект `tokenizer`. Как мы уже говорили, мы будем использовать предварительно обученную модель BERT, поэтому начнем с загрузки и кэширования соответствующего токенизатора:
```python
from transformers import AutoTokenizer
model_checkpoint = "bert-base-cased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
```
Вы можете заменить `model_checkpoint` на любую другую модель из [Hub](https://huggingface.co/models) или на локальную папку, в которой вы сохранили предварительно обученную модель и токенизатор. Единственное ограничение - токенизатор должен быть создан с помощью библиотеки 🤗 Tokenizers, поэтому существует "быстрая" версия. Вы можете увидеть все архитектуры, которые поставляются с быстрой версией в [этой большой таблице](https://huggingface.co/transformers/#supported-frameworks), а чтобы проверить, что используемый вами объект `tokenizer` действительно поддерживается 🤗 Tokenizers, вы можете посмотреть на его атрибут `is_fast`:
```py
tokenizer.is_fast
```
```python out
True
```
Для токенизации предварительно токинизированного ввода мы можем использовать наш `tokenizer`, как обычно, просто добавив `is_split_into_words=True`:
```py
inputs = tokenizer(raw_datasets["train"][0]["tokens"], is_split_into_words=True)
inputs.tokens()
```
```python out
['[CLS]', 'EU', 'rejects', 'German', 'call', 'to', 'boycott', 'British', 'la', '##mb', '.', '[SEP]']
```
Как мы видим, токенизатор добавил специальные токены, используемые моделью (`[CLS]` в начале и `[SEP]` в конце), и оставил большинство слов нетронутыми. Слово `lamb`, однако, было токенизировано на два подслова, `la` и `##mb`. Это вносит несоответствие между нашими входными данными и метками: список меток состоит всего из 9 элементов, в то время как наши входные данные теперь содержат 12 токенов. Учесть специальные токены легко (мы знаем, что они находятся в начале и в конце), но нам также нужно убедиться, что мы выровняли все метки с соответствующими словами.
К счастью, поскольку мы используем быстрый токенизатор, у нас есть доступ к суперспособностям 🤗 Tokenizers, что означает, что мы можем легко сопоставить каждый токен с соответствующим словом (как показано в [Глава 6](../chapter6/3)):
```py
inputs.word_ids()
```
```python out
[None, 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, None]
```
Немного поработав, мы сможем расширить список меток, чтобы он соответствовал токенам. Первое правило, которое мы применим, заключается в том, что специальные токены получают метку `-100`. Это связано с тем, что по умолчанию `-100` - это индекс, который игнорируется в функции потерь, которую мы будем использовать (кросс-энтропия). Затем каждый токен получает ту же метку, что и токен, с которого началось слово, в котором он находится, поскольку они являются частью одной и той же сущности. Для токенов, находящихся внутри слова, но не в его начале, мы заменяем `B-` на `I-` (поскольку такие токены не являются началом сущности):
```python
def align_labels_with_tokens(labels, word_ids):
new_labels = []
current_word = None
for word_id in word_ids:
if word_id != current_word:
# Начало нового слова!
current_word = word_id
label = -100 if word_id is None else labels[word_id]
new_labels.append(label)
elif word_id is None:
# Специальный токен
new_labels.append(-100)
else:
# То же слово, что и предыдущий токен
label = labels[word_id]
# Если метка B-XXX, заменяем ее на I-XXX
if label % 2 == 1:
label += 1
new_labels.append(label)
return new_labels
```
Давайте опробуем это на нашем первом предложении:
```py
labels = raw_datasets["train"][0]["ner_tags"]
word_ids = inputs.word_ids()
print(labels)
print(align_labels_with_tokens(labels, word_ids))
```
```python out
[3, 0, 7, 0, 0, 0, 7, 0, 0]
[-100, 3, 0, 7, 0, 0, 0, 7, 0, 0, 0, -100]
```
Как мы видим, наша функция добавила `-100` для двух специальных токенов в начале и в конце и новый `0` для нашего слова, которое было разбито на две части.
<Tip>
✏️ **Попробуйте!** Некоторые исследователи предпочитают назначать только одну метку на слово и присваивать `-100` другим подтокенам в данном слове. Это делается для того, чтобы длинные слова, часть которых состоит из множества субтокенов, не вносили значительный вклад в потери.
</Tip>
Чтобы предварительно обработать весь наш датасет, нам нужно провести токенизацию всех входных данных и применить `align_labels_with_tokens()` ко всем меткам. Чтобы воспользоваться преимуществами скорости нашего быстрого токенизатора, лучше всего токенизировать много текстов одновременно, поэтому мы напишем функцию, которая обрабатывает список примеров и использует метод `Dataset.map()` с параметром `batched=True`. Единственное отличие от нашего предыдущего примера заключается в том, что функция `word_ids()` должна получить индекс примера, идентификаторы слов которого нам нужны, с учётом того что входными данными для токенизатора являются списки текстов (или, в нашем случае, списки слов), поэтому мы добавляем и это:
```py
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples["tokens"], truncation=True, is_split_into_words=True
)
all_labels = examples["ner_tags"]
new_labels = []
for i, labels in enumerate(all_labels):
word_ids = tokenized_inputs.word_ids(i)
new_labels.append(align_labels_with_tokens(labels, word_ids))
tokenized_inputs["labels"] = new_labels
return tokenized_inputs
```
Обратите внимание, что мы еще не добавляли во входные данные дополняющие токены; мы сделаем это позже, при создании батчей с помощью коллатора данных.
Теперь мы можем применить всю эту предварительную обработку к другим частям нашего датасета:
```py
tokenized_datasets = raw_datasets.map(
tokenize_and_align_labels,
batched=True,
remove_columns=raw_datasets["train"].column_names,
)
```
Мы сделали самую сложную часть! Теперь, когда данные прошли предварительную обработку, само обучение будет выглядеть примерно так, как мы делали это в [Главе 3](../chapter3/1).
{#if fw === 'pt'}
## Дообучение модели с помощью API `Trainer`[[fine-tuning-the-model-with-the-trainer-api]]
Фактический код, использующий `Trainer`, будет таким же, как и раньше; единственные изменения - это способ объединения данных в батч и функция вычисления метрики.
{:else}
## Дообучение модели с помощью Keras[[fine-tuning-the-model-with-keras]]
Фактический код, использующий Keras, будет очень похож на предыдущий; единственные изменения - это способ объединения данных в батч и функция вычисления метрики.
{/if}
### Сопоставление данных[[data-collation]]
Мы не можем просто использовать `DataCollatorWithPadding`, как в [Главе 3](../chapter3/1), потому что в этом случае дополняются только входные данные (идентификаторы входов, маска внимания и идентификаторы типов токенов). Здесь наши метки должны быть дополнены точно так же, как и входы, чтобы они оставались одного размера, используя `-100` в качестве значения, чтобы соответствующие прогнозы игнорировались при вычислении потерь.
Все это делает [`DataCollatorForTokenClassification`](https://huggingface.co/transformers/main_classes/data_collator.html#datacollatorfortokenclassification). Как и `DataCollatorWithPadding`, он принимает `токенизатор`, используемый для предварительной обработки входных данных:
{#if fw === 'pt'}
```py
from transformers import DataCollatorForTokenClassification
data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)
```
{:else}
```py
from transformers import DataCollatorForTokenClassification
data_collator = DataCollatorForTokenClassification(
tokenizer=tokenizer, return_tensors="tf"
)
```
{/if}
Чтобы проверить его на нескольких примерах, мы можем просто вызвать его на списке примеров из нашего токенизированного обучающего набора:
```py
batch = data_collator([tokenized_datasets["train"][i] for i in range(2)])
batch["labels"]
```
```python out
tensor([[-100, 3, 0, 7, 0, 0, 0, 7, 0, 0, 0, -100],
[-100, 1, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100]])
```
Давайте сравним это с метками для первого и второго элементов в нашем датасете:
```py
for i in range(2):
print(tokenized_datasets["train"][i]["labels"])
```
```python out
[-100, 3, 0, 7, 0, 0, 0, 7, 0, 0, 0, -100]
[-100, 1, 2, -100]
```
{#if fw === 'pt'}
Как мы видим, второй набор меток был дополнен до длины первого с помощью значения `-100`.
{:else}
Наш коллатор данных готов к работе! Теперь давайте используем его для создания датасета `tf.data.Dataset` с помощью метода `to_tf_dataset()`. Вы также можете использовать `model.prepare_tf_dataset()`, чтобы сделать это с меньшим количеством кода - вы увидите это в некоторых других разделах этой главы.
```py
tf_train_dataset = tokenized_datasets["train"].to_tf_dataset(
columns=["attention_mask", "input_ids", "labels", "token_type_ids"],
collate_fn=data_collator,
shuffle=True,
batch_size=16,
)
tf_eval_dataset = tokenized_datasets["validation"].to_tf_dataset(
columns=["attention_mask", "input_ids", "labels", "token_type_ids"],
collate_fn=data_collator,
shuffle=False,
batch_size=16,
)
```
Следующая остановка: сама модель.
{/if}
{#if fw === 'tf'}
### Определение модели[[defining-the-model]]
Поскольку мы работаем над проблемой классификации токенов, мы будем использовать класс `TFAutoModelForTokenClassification`. Главное, что нужно помнить при определении этой модели, - это передать информацию о количестве имеющихся у нас меток. Проще всего передать это число с помощью аргумента `num_labels`, но если мы хотим получить красивый виджет инференса, подобный тому, что мы видели в начале этого раздела, то лучше задать правильные соответствия меток.
Они должны быть заданы двумя словарями, `id2label` и `label2id`, которые содержат отображение идентификатора в метку и наоборот:
```py
id2label = {i: label for i, label in enumerate(label_names)}
label2id = {v: k for k, v in id2label.items()}
```
Теперь мы можем просто передать их в метод `TFAutoModelForTokenClassification.from_pretrained()`, и они будут заданы в конфигурации модели, затем правильно сохранены и загружены в Hub:
```py
from transformers import TFAutoModelForTokenClassification
model = TFAutoModelForTokenClassification.from_pretrained(
model_checkpoint,
id2label=id2label,
label2id=label2id,
)
```
Как и при определении `TFAutoModelForSequenceClassification` в [Главе 3](../chapter3/1), при создании модели выдается предупреждение о том, что некоторые веса не были использованы (веса из предварительно обученной головы), а другие веса инициализированы случайно (веса из новой головы классификации токенов), и что эту модель нужно обучить. Мы сделаем это через минуту, но сначала давайте перепроверим, что наша модель имеет правильное количество меток:
```python
model.config.num_labels
```
```python out
9
```
<Tip warning={true}>
⚠️ Если у вас есть модель с неправильным количеством меток, то при последующем вызове `model.fit()` вы получите непонятную ошибку. Это может вызвать раздражение при отладке, поэтому обязательно выполните эту проверку, чтобы убедиться, что у вас есть ожидаемое количество меток.
</Tip>
### Дообучение модели[[fine-tuning-the-model]]
Теперь мы готовы к обучению нашей модели! Однако сначала нам нужно сделать еще немного работы: войти в Hugging Face и определить гиперпараметры обучения. Если вы работаете в блокноте, есть удобная функция, которая поможет вам в этом:
```python
from huggingface_hub import notebook_login
notebook_login()
```
Появится виджет, в котором вы можете ввести свои учетные данные для входа в Hugging Face.
Если вы работаете не в блокноте, просто введите следующую строку в терминале:
```bash
huggingface-cli login
```
После входа в аккаунт мы можем подготовить все необходимое для компиляции нашей модели. 🤗 Transformers предоставляет удобную функцию `create_optimizer()`, которая создаст вам оптимизатор `AdamW` с соответствующими настройками затухания весов и затухания скорости обучения, что позволит улучшить качество вашей модели по сравнению со встроенным оптимизатором `Adam`:
```python
from transformers import create_optimizer
import tensorflow as tf
# Обучение со смешанной точностью float16
# Закомментируйте эту строку, если вы используете GPU, которому это не принесет никаких преимуществ
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# Количество шагов обучения - это количество примеров в датасете, разделенное на размер батча, затем умноженное
# на общее количество эпох. Обратите внимание, что tf_train_dataset здесь - это разбитое на батчи tf.data.Dataset,
# а не оригинальный датасет Hugging Face, поэтому его len() уже равен num_samples // batch_size.
num_epochs = 3
num_train_steps = len(tf_train_dataset) * num_epochs
optimizer, schedule = create_optimizer(
init_lr=2e-5,
num_warmup_steps=0,
num_train_steps=num_train_steps,
weight_decay_rate=0.01,
)
model.compile(optimizer=optimizer)
```
Обратите внимание, что мы не указываем аргумент `loss` в `compile()`. Это связано с тем, что модели могут вычислять потери внутри себя - если вы компилируете без потерь и предоставляете свои метки во входном словаре (как мы делаем в наших датасетах), то модель будет обучаться, используя эти внутренние потери, которые будут соответствовать задаче и типу выбранной вами модели.
Далее мы определяем `PushToHubCallback` для загрузки нашей модели в Hub во время обучения модели с помощью этого обратного вызова:
```python
from transformers.keras_callbacks import PushToHubCallback
callback = PushToHubCallback(output_dir="bert-finetuned-ner", tokenizer=tokenizer)
model.fit(
tf_train_dataset,
validation_data=tf_eval_dataset,
callbacks=[callback],
epochs=num_epochs,
)
```
С помощью аргумента `hub_model_id` можно указать полное имя репозитория, в который вы хотите передать модель (в частности, этот аргумент нужно использовать, чтобы передать модель в организацию). Например, когда мы отправили модель в [организацию `huggingface-course`](https://huggingface.co/huggingface-course), мы добавили `hub_model_id="huggingface-course/bert-finetuned-ner"`. По умолчанию используемое хранилище будет находиться в вашем пространстве имен и называться в соответствии с заданной вами выходной директорией, например `"cool_huggingface_user/bert-finetuned-ner"`.
<Tip>
💡 Если выходной каталог, который вы используете, уже существует, он должен быть локальным клоном репозитория, в который вы хотите выполнить push. Если это не так, вы получите ошибку при вызове `model.fit()` и должны будете задать новое имя.
</Tip>
Обратите внимание, что во время обучения каждый раз, когда модель сохраняется (здесь - каждую эпоху), она загружается на хаб в фоновом режиме. Таким образом, при необходимости вы сможете возобновить обучение на другой машине.
На этом этапе вы можете использовать виджет инференса на Model Hub, чтобы протестировать свою модель и поделиться ею с друзьями. Вы успешно дообучили модель для задачи классификации токенов - поздравляем! Но насколько хороша наша модель на самом деле? Чтобы выяснить это, нам следует оценить некоторые метрики.
{/if}
### Метрики[[metrics]]
{#if fw === 'pt'}
Чтобы `Trainer` вычислял метрику каждую эпоху, нам нужно определить функцию `compute_metrics()`, которая принимает массивы прогнозов и меток и возвращает словарь с именами и значениями метрик.
Традиционно для оценки прогнозирования классификации токенов используется библиотека [*seqeval*](https://github.com/chakki-works/seqeval). Чтобы использовать эту метрику, сначала нужно установить библиотеку *seqeval*:
```py
!pip install seqeval
```
Мы можем загрузить ее с помощью функции `evaluate.load()`, как мы это делали в [Главе 3](../chapter3/1):
{:else}
Традиционно для оценки прогнозирования классификации токенов используется библиотека [*seqeval*](https://github.com/chakki-works/seqeval). Чтобы использовать эту метрику, сначала нужно установить библиотеку *seqeval*:
```py
!pip install seqeval
```
Мы можем загрузить ее с помощью функции `evaluate.load()`, как мы это делали в [Главе 3](../chapter3/1):
{/if}
```py
import evaluate
metric = evaluate.load("seqeval")
```
Эта метрика ведет себя не так, как стандартная accuracy: на самом деле она принимает списки меток как строки, а не как целые числа, поэтому нам нужно полностью декодировать прогноз и метки перед передачей их в метрику. Давайте посмотрим, как это работает. Сначала мы получим метки для нашего первого обучающего примера:
```py
labels = raw_datasets["train"][0]["ner_tags"]
labels = [label_names[i] for i in labels]
labels
```
```python out
['B-ORG', 'O', 'B-MISC', 'O', 'O', 'O', 'B-MISC', 'O', 'O']
```
Затем мы можем создать фальшивые прогнозы для них, просто изменив значение в индексе 2:
```py
predictions = labels.copy()
predictions[2] = "O"
metric.compute(predictions=[predictions], references=[labels])
```
Обратите внимание, что метрика принимает список прогнозов (не только один) и список меток. Вот результат:
```python out
{'MISC': {'precision': 1.0, 'recall': 0.5, 'f1': 0.67, 'number': 2},
'ORG': {'precision': 1.0, 'recall': 1.0, 'f1': 1.0, 'number': 1},
'overall_precision': 1.0,
'overall_recall': 0.67,
'overall_f1': 0.8,
'overall_accuracy': 0.89}
```
{#if fw === 'pt'}
Она возвращает огромное количество информации! Мы получаем оценки precision, recall и F1 для каждой отдельной сущности, а также в целом. Для расчета метрик мы сохраним только общую оценку, но вы можете настроить функцию `compute_metrics()` так, чтобы она возвращала все метрики, которые вы хотите получить.
Эта функция `compute_metrics()` сначала берет argmax логитов, чтобы преобразовать их в прогнозы (как обычно, логиты и вероятности расположены в том же порядке, поэтому нам не нужно применять softmax). Затем нам нужно преобразовать метки и прогнозы из целых чисел в строки. Мы удаляем все значения, для которых метка равна `-100`, а затем передаем результаты в метод `metric.compute()`:
```py
import numpy as np
def compute_metrics(eval_preds):
logits, labels = eval_preds
predictions = np.argmax(logits, axis=-1)
# Удаляем игнорируемый индекс (специальные токены) и преобразуем в метки
true_labels = [[label_names[l] for l in label if l != -100] for label in labels]
true_predictions = [
[label_names[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
all_metrics = metric.compute(predictions=true_predictions, references=true_labels)
return {
"precision": all_metrics["overall_precision"],
"recall": all_metrics["overall_recall"],
"f1": all_metrics["overall_f1"],
"accuracy": all_metrics["overall_accuracy"],
}
```
Теперь, когда это сделано, мы почти готовы к определению нашего `Trainer`. Нам просто нужна `model`, чтобы дообучить ее!
{:else}
Она возвращает огромное количество информации! Мы получаем оценки precision, recall и F1 для каждой отдельной сущности, а также в целом. Теперь давайте посмотрим, что произойдет, если мы попробуем использовать реальные прогнозы модели для вычисления реальных оценок.
TensorFlow не любит конкатенировать наши прогнозы, поскольку они имеют переменную длину последовательности. Это означает, что мы не можем просто использовать `model.predict()` - но это нас не остановит. Мы будем получать прогнозы по батчу за раз и конкатенировать их в один большой длинный список по мере продвижения, отбрасывая токены `-100`, которые указывают на маскирование/дополнение, а затем вычислять метрики для списка в конце:
```py
import numpy as np
all_predictions = []
all_labels = []
for batch in tf_eval_dataset:
logits = model.predict_on_batch(batch)["logits"]
labels = batch["labels"]
predictions = np.argmax(logits, axis=-1)
for prediction, label in zip(predictions, labels):
for predicted_idx, label_idx in zip(prediction, label):
if label_idx == -100:
continue
all_predictions.append(label_names[predicted_idx])
all_labels.append(label_names[label_idx])
metric.compute(predictions=[all_predictions], references=[all_labels])
```
```python out
{'LOC': {'precision': 0.91, 'recall': 0.92, 'f1': 0.91, 'number': 1668},
'MISC': {'precision': 0.70, 'recall': 0.79, 'f1': 0.74, 'number': 702},
'ORG': {'precision': 0.85, 'recall': 0.90, 'f1': 0.88, 'number': 1661},
'PER': {'precision': 0.95, 'recall': 0.95, 'f1': 0.95, 'number': 1617},
'overall_precision': 0.87,
'overall_recall': 0.91,
'overall_f1': 0.89,
'overall_accuracy': 0.97}
```
Как ваша модель показала себя по сравнению с нашей? Если вы получили похожие цифры, значит, ваше обучение прошло успешно!
{/if}
{#if fw === 'pt'}
### Определение модели[[defining-the-model]]
Поскольку мы работаем над проблемой классификации токенов, мы будем использовать класс `AutoModelForTokenClassification`. Главное, что нужно помнить при определении этой модели, - это передать информацию о количестве имеющихся у нас меток. Проще всего передать это число с помощью аргумента `num_labels`, но если мы хотим получить красивый виджет инференса, подобный тому, что мы видели в начале этого раздела, то лучше задать правильное сопоставление меток.
Оно должно задаваться двумя словарями, `id2label` и `label2id`, которые содержат соответствие между идентификатором и меткой и наоборот:
```py
id2label = {i: label for i, label in enumerate(label_names)}
label2id = {v: k for k, v in id2label.items()}
```
Теперь мы можем просто передать их в метод `AutoModelForTokenClassification.from_pretrained()`, и они будут заданы в конфигурации модели, а затем правильно сохранены и загружены в Hub:
```py
from transformers import AutoModelForTokenClassification
model = AutoModelForTokenClassification.from_pretrained(
model_checkpoint,
id2label=id2label,
label2id=label2id,
)
```
Как и в случае определения `AutoModelForSequenceClassification` в [Главе 3](../chapter3/1), при создании модели выдается предупреждение о том, что некоторые веса не были использованы (те, что были получены из предварительно обученной головы), а другие инициализированы случайно (те, что были получены из новой головы классификации токенов), и что эту модель необходимо обучить. Мы сделаем это через минуту, но сначала давайте перепроверим, что наша модель имеет правильное количество меток:
```python
model.config.num_labels
```
```python out
9
```
<Tip warning={true}>
⚠️ Если у вас есть модель с неправильным количеством меток, то при последующем вызове метода `Trainer.train()` вы получите непонятную ошибку (что-то вроде "CUDA error: device-side assert triggered"). Это главная причина ошибок, о которых сообщают пользователи, поэтому обязательно выполните эту проверку, чтобы убедиться, что у вас есть ожидаемое количество меток.
</Tip>
### Дообучение модели[[fine-tuning-the-model]]
Теперь мы готовы к обучению нашей модели! Нам осталось сделать две последние вещи, прежде чем мы определим наш `Trainer`: войти в Hugging Face и определить наши аргументы для обучения. Если вы работаете в блокноте, есть удобная функция, которая поможет вам в этом:
```python
from huggingface_hub import notebook_login
notebook_login()
```
Появится виджет, в котором вы можете ввести свои учетные данные для входа в Hugging Face.
Если вы работаете не в ноутбуке, просто введите следующую строку в терминале:
```bash
huggingface-cli login
```
Как только это будет сделано, мы сможем определить наши `TrainingArguments`:
```python
from transformers import TrainingArguments
args = TrainingArguments(
"bert-finetuned-ner",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=2e-5,
num_train_epochs=3,
weight_decay=0.01,
push_to_hub=True,
)
```
Большинство из них вы уже видели: мы задаем некоторые гиперпараметры (например, скорость обучения, количество эпох для обучения и затухание весов) и указываем `push_to_hub=True`, чтобы указать, что мы хотим сохранить модель и оценить ее в конце каждой эпохи, а также что мы хотим загрузить наши результаты в Model Hub. Обратите внимание, что с помощью аргумента `hub_model_id` можно указать имя репозитория, в который вы хотите передать модель (в частности, этот аргумент нужно использовать, чтобы передать модель в организацию). Например, когда мы передавали модель в [организацию `huggingface-course`](https://huggingface.co/huggingface-course), мы добавили `hub_model_id="huggingface-course/bert-finetuned-ner"` в `TrainingArguments`. По умолчанию используемый репозиторий будет находиться в вашем пространстве имен и называться в соответствии с заданным вами выходным каталогом, так что в нашем случае это будет `"sgugger/bert-finetuned-ner"`.
<Tip>
💡 Если выходной каталог, который вы используете, уже существует, он должен быть локальным клоном репозитория, в который вы хотите передать модель. Если это не так, вы получите ошибку при определении вашего `Trainer` и должны будете задать новое имя.
</Tip>
Наконец, мы просто передаем все в `Trainer` и запускаем обучение:
```python
from transformers import Trainer
trainer = Trainer(
model=model,
args=args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
)
trainer.train()
```
Обратите внимание, что во время обучения каждый раз, когда модель сохраняется (здесь - каждую эпоху), она загружается в Hub в фоновом режиме. Таким образом, при необходимости вы сможете возобновить обучение на другой машине.
После завершения обучения мы используем метод `push_to_hub()`, чтобы убедиться, что загружена самая последняя версия модели:
```py
trainer.push_to_hub(commit_message="Training complete")
```
Эта команда возвращает URL только что выполненного commit, если вы хотите его проверить:
```python out
'https://huggingface.co/sgugger/bert-finetuned-ner/commit/26ab21e5b1568f9afeccdaed2d8715f571d786ed'
```
`Trainer` также создает черновик карточки модели со всеми результатами оценки и загружает его. На этом этапе вы можете использовать виджет инференса на Model Hub, чтобы протестировать свою модель и поделиться ею с друзьями. Вы успешно дообучили модель для задачи классификации токенов - поздравляем!
Если вы хотите более глубоко погрузиться в цикл обучения, мы покажем вам, как сделать то же самое с помощью 🤗 Accelerate.
## Индивидуальный цикл обучения[[a-custom-training-loop]]
Теперь давайте рассмотрим полный цикл обучения, чтобы вы могли легко настроить нужные вам части. Он будет очень похож на тот, что мы делали в [Главе 3](../chapter3/4), с некоторыми изменениями для оценки.
### Подготовка всего к обучению[[preparing-everything-for-training]]
Сначала нам нужно создать `DataLoader` для наших датасетов. Мы используем наш `data_collator` в качестве `collate_fn` и перемешиваем обучающий набор, но не валидационный:
```py
from torch.utils.data import DataLoader
train_dataloader = DataLoader(
tokenized_datasets["train"],
shuffle=True,
collate_fn=data_collator,
batch_size=8,
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], collate_fn=data_collator, batch_size=8
)
```
Затем мы повторно инстанцируем нашу модель, чтобы убедиться, что мы не продолжаем дообучать модель, а снова начинаем с предварительно обученной модели BERT:
```py
model = AutoModelForTokenClassification.from_pretrained(
model_checkpoint,
id2label=id2label,
label2id=label2id,
)
```
Тогда нам понадобится оптимизатор. Мы будем использовать классический `AdamW`, который похож на `Adam`, но с исправлениями в способе применения затухания весов:
```py
from torch.optim import AdamW
optimizer = AdamW(model.parameters(), lr=2e-5)
```
Когда у нас есть все эти объекты, мы можем отправить их в метод `accelerator.prepare()`:
```py
from accelerate import Accelerator
accelerator = Accelerator()
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
```
<Tip>
🚨 Если вы обучаетесь на TPU, вам нужно будет перенести весь код, начиная с ячейки выше, в специальную функцию обучения. Подробнее смотрите [Главу 3](../chapter3/1).
</Tip>
Теперь, когда мы отправили наш `train_dataloader` в `accelerator.prepare()`, мы можем использовать его длину для вычисления количества шагов обучения. Помните, что это всегда нужно делать после подготовки загрузчика данных, так как этот метод изменит его длину. Мы используем классический линейный планировшик скорости обучения до 0:
```py
from transformers import get_scheduler
num_train_epochs = 3
num_update_steps_per_epoch = len(train_dataloader)
num_training_steps = num_train_epochs * num_update_steps_per_epoch
lr_scheduler = get_scheduler(
"linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps,
)
```
Наконец, чтобы передать нашу модель в Hub, нам нужно создать объект `Repository` в рабочей папке. Сначала авторизуйтесь в Hugging Face, если вы еще не авторизованы. Мы определим имя репозитория по идентификатору модели, который мы хотим присвоить нашей модели (не стесняйтесь заменить `repo_name` на свой собственный выбор; он просто должен содержать ваше имя пользователя, что и делает функция `get_full_repo_name()`):
```py
from huggingface_hub import Repository, get_full_repo_name
model_name = "bert-finetuned-ner-accelerate"
repo_name = get_full_repo_name(model_name)
repo_name
```
```python out
'sgugger/bert-finetuned-ner-accelerate'
```
Затем мы можем клонировать этот репозиторий в локальную папку. Если она уже существует, эта локальная папка должна быть существующим клоном репозитория, с которым мы работаем:
```py
output_dir = "bert-finetuned-ner-accelerate"
repo = Repository(output_dir, clone_from=repo_name)
```
Теперь мы можем загрузить все, что сохранили в `output_dir`, вызвав метод `repo.push_to_hub()`. Это поможет нам загружать промежуточные модели в конце каждой эпохи.
### Цикл обучения[[training-loop]]
Теперь мы готовы написать полный цикл обучения. Чтобы упростить его оценочную часть, мы определяем функцию `postprocess()`, которая принимает прогнозы и метки и преобразует их в списки строк, как того ожидает наш объект `metric`:
```py
def postprocess(predictions, labels):
predictions = predictions.detach().cpu().clone().numpy()
labels = labels.detach().cpu().clone().numpy()
# Удаляем игнорируемый индекс (специальные токены) и преобразуем в метки
true_labels = [[label_names[l] for l in label if l != -100] for label in labels]
true_predictions = [
[label_names[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
return true_labels, true_predictions
```
Затем мы можем написать цикл обучения. После определения прогресс-бара, чтобы следить за ходом обучения, цикл состоит из трех частей:
- Само обучение представляет собой классическую итерацию по `train_dataloader`, прямой проход по модели, затем обратный проход и шаг оптимизатора.
- Оценка, в которой есть новшество после получения выходов нашей модели на батче: поскольку два процесса могли дополнять входы и метки до разных форм, нам нужно использовать `accelerator.pad_across_processes()`, чтобы сделать прогнозы и метки одинаковой формы перед вызовом метода `gather()`. Если мы этого не сделаем, оценка либо завершится с ошибкой, либо зависнет навсегда. Затем мы отправляем результаты в `metric.add_batch()` и вызываем `metric.compute()` после завершения цикла оценки.
- Сохранение и загрузка, где мы сначала сохраняем модель и токенизатор, а затем вызываем `repo.push_to_hub()`. Обратите внимание, что мы используем аргумент `blocking=False`, чтобы указать библиотеке 🤗 Hub на выполнение push в асинхронном процессе. Таким образом, обучение продолжается нормально, а эта (длинная) инструкция выполняется в фоновом режиме.
Вот полный код цикла обучения:
```py
from tqdm.auto import tqdm
import torch
progress_bar = tqdm(range(num_training_steps))
for epoch in range(num_train_epochs):
# Обучение
model.train()
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
# Оценка
model.eval()
for batch in eval_dataloader:
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
labels = batch["labels"]
# Необходимо добавить предсказания и метки для gather
predictions = accelerator.pad_across_processes(predictions, dim=1, pad_index=-100)
labels = accelerator.pad_across_processes(labels, dim=1, pad_index=-100)
predictions_gathered = accelerator.gather(predictions)
labels_gathered = accelerator.gather(labels)
true_predictions, true_labels = postprocess(predictions_gathered, labels_gathered)
metric.add_batch(predictions=true_predictions, references=true_labels)
results = metric.compute()
print(
f"epoch {epoch}:",
{
key: results[f"overall_{key}"]
for key in ["precision", "recall", "f1", "accuracy"]
},
)
# Сохранение и загрузка
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False
)
```
Если вы впервые видите модель, сохраненную с помощью 🤗 Accelerate, давайте посмотрим на три строки кода, которые идут вместе с ним:
```py
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save)
```
Первая строка не требует пояснений: она указывает всем процессам подождать, пока все не окажутся на этой стадии, прежде чем продолжить работу. Это нужно для того, чтобы убедиться, что у нас одна и та же модель в каждом процессе перед сохранением. Затем мы берем `unwrapped_model`, которая является базовой моделью, которую мы определили. Метод `accelerator.prepare()` изменяет модель для работы в распределенном обучении, поэтому у нее больше не будет метода `save_pretrained()`; метод `accelerator.unwrap_model()` отменяет этот шаг. Наконец, мы вызываем `save_pretrained()`, но указываем этому методу использовать `accelerator.save()` вместо `torch.save()`.
После того как это будет сделано, у вас должна получиться модель, выдающая результаты, очень похожие на те, что были обучены с помощью `Trainer`. Вы можете посмотреть модель, которую мы обучили с помощью этого кода, на [*huggingface-course/bert-finetuned-ner-accelerate*](https://huggingface.co/huggingface-course/bert-finetuned-ner-accelerate). А если вы хотите протестировать какие-либо изменения в цикле обучения, вы можете напрямую реализовать их, отредактировав код, показанный выше!
{/if}
## Использование дообученной модели[[using-the-fine-tuned-model]]
Мы уже показали вам, как можно использовать модель, которую мы дообучили на Model Hub, с помощью виджета инференса. Чтобы использовать ее локально в `pipeline`, нужно просто указать соответствующий идентификатор модели:
```py
from transformers import pipeline
# Замените это на свою собственную контрольную точку
model_checkpoint = "huggingface-course/bert-finetuned-ner"
token_classifier = pipeline(
"token-classification", model=model_checkpoint, aggregation_strategy="simple"
)
token_classifier("My name is Sylvain and I work at Hugging Face in Brooklyn.")
```
```python out
[{'entity_group': 'PER', 'score': 0.9988506, 'word': 'Sylvain', 'start': 11, 'end': 18},
{'entity_group': 'ORG', 'score': 0.9647625, 'word': 'Hugging Face', 'start': 33, 'end': 45},
{'entity_group': 'LOC', 'score': 0.9986118, 'word': 'Brooklyn', 'start': 49, 'end': 57}]
```
Отлично! Наша модель работает так же хорошо, как и модель по умолчанию для этого конвейера!
| course/chapters/ru/chapter7/2.mdx/0 | {
"file_path": "course/chapters/ru/chapter7/2.mdx",
"repo_id": "course",
"token_count": 35624
} | 143 |
<!-- DISABLE-FRONTMATTER-SECTIONS -->
# คำถามท้ายบท
<CourseFloatingBanner
chapter={1}
classNames="absolute z-10 right-0 top-0"
/>
บทนี้พูดถึงพื้นฐานค่อนข้างเยอะมาก ไม่ต้องกังวลไปหากคุณไม่เข้าใจรายละเอียดทั้งหมด บทหน้าจะช่วยอธิบายว่าแต่ละอย่างทำงานกันเบื้องหลังอย่างไร
ตอนนี้มาทดสอบกันดีกว่าว่าคุณได้เรียนรู้อะไรมาบ้างในบทนี้!
### 1. เปิดหา checkpoint `roberta-large-mnli` ใน Hub โมเดลนี้ใช้ในงานอะไร
<Question
choices={[
{
text: "การสรุปความ",
explain: "โปรดดูที่<a href=\"https://huggingface.co/roberta-large-mnli\">หน้าเพจ roberta-large-mnli</a>อีกครั้ง"
},
{
text: "การแยกแยะข้อความ",
explain: "โมเดลนี้แยกแยะว่าประโยคสองประโยคนั้นเข้าข่ายกรณีใดดังต่อไปนี้ (หักล้างกัน, กลาง, ส่งเสริมกัน) หรือเรียกอีกชื่อหนึ่งว่า<em>การอนุมาน</em>",
correct: true
},
{
text: "การสร้างข้อความ",
explain: "โปรดดูที่<a href=\"https://huggingface.co/roberta-large-mnli\">หน้าเพจ roberta-large-mnli</a>อีกครั้ง"
}
]}
/>
### 2. โค้ดต่อไปนี้ให้ผลลัพธ์ว่าอย่างไร?
```py
from transformers import pipeline
ner = pipeline("ner", grouped_entities=True)
ner("My name is Sylvain and I work at Hugging Face in Brooklyn.")
```
<Question
choices={[
{
text: "ได้ผลออกมาเป็นคะแนนระบุว่าประโยคดังกล่าวเป็นข้อความ \"ด้านบวก\" หรือ \"ด้านลบ\" ",
explain: "ข้อนี้ผิด — ผลลัพธ์นี้ได้จาก pipeline <code>sentiment-analysis</code>"
},
{
text: "ได้ผลออกมาเป็นข้อความที่ทำให้ประโยคสมบูรณ์",
explain: "ข้อนี้ผิด — ผลลัพธ์นี้ได้จาก pipeline <code>text-generation</code>",
},
{
text: "ได้ผลออกมาระบุว่าคำใดเป็นบุคคล, องค์กร, หรือสถานที่",
explain: "หากตั้งค่าว่า <code>grouped_entities=True</code> จะสามารถรวมคำหลายคำที่ระบุสิ่งเดียวกันไว้ได้ เช่น \"Hugging Face\" ประกอบด้วยคำสองคำ แต่ระบุถึงสิ่งสิ่งเดียว",
correct: true
}
]}
/>
### 3. เราควรแทนค่า ... ในโค้ดด้านล่างว่าอะไร?
```py
from transformers import pipeline
filler = pipeline("fill-mask", model="bert-base-cased")
result = filler("...")
```
<Question
choices={[
{
text: "This <mask> has been waiting for you.",
explain: "ข้อนี้ผิด โปรดดูรายละเอียดของโมเดล <code>bert-base-cased</code> แล้วลองตรวจสอบว่าทำผิดตรงไหนไป"
},
{
text: "This [MASK] has been waiting for you.",
explain: "ถูกต้อง! โมเดลนี้ เว้นช่องว่างด้วยโทเคน [MASK]",
correct: true
},
{
text: "This man has been waiting for you.",
explain: "ข้อนี้ผิด pipeline ระบุว่าทำงาน `fill-mask` ซึ่งก็คือการเติมคำในช่องว่าง แต่ไม่มีโทเคนใดระบุช่องว่างในประโยคเลย"
}
]}
/>
### 4. ทำไมโค้ดด้านล่างรันไม่ออก?
```py
from transformers import pipeline
classifier = pipeline("zero-shot-classification")
result = classifier("This is a course about the Transformers library")
```
<Question
choices={[
{
text: "pipline นี้ต้องการระบุ label เพื่อใช้ในการแยกแยะประโยค",
explain: "ถูกต้อง โค้ดที่รันออกจะต้องเพิ่ม <code>candidate_labels=[...]</code> เข้าไปด้วย",
correct: true
},
{
text: "pipeline นี้ต้องการประโยค input มากกว่าหนึ่งประโยค",
explain: "ข้อนี้ผิด ถึงแม้ว่าความจริงแล้วจะสามารถใส่ประโยคหลายประโยคเป็น list เข้าไปเป็น input เพื่อรันได้(เหมือน pipeline อื่น ๆ)"
},
{
text: "library 🤗 Transformers พังแบบงง ๆ เหมือนทุกทีน่ะแหละ",
explain: "ขออนุญาตงดแสดงความคิดเห็นกับคนเลือกข้อนี้นะ"
},
{
text: "pipeline นี้ต้องการประโยค input ที่ยาวกว่านี้ ประโยคนี้สั้นเกินไป",
explain: "ข้อนี้ผิด และหากใส่ประโยคที่ยาวเกินไปใน pipeline นี้ก็จะโดนตัดให้สั้นลงอยู่ดี"
}
]}
/>
### 5. "transfer learning" (การเรียนรู้แบบส่งต่อ) หมายความว่าอย่างไร?
<Question
choices={[
{
text: "ส่งต่อความรู้จาก pretrain model ไปยังโมเดลใหม่โดยเทรนกับข้อมูลเดิม",
explain: "ข้อนี้ผิด หากทำแบบนี้ก็จะเป็นการสร้างโมเดลเดิมสองครั้ง"
},
{
text: "ส่งต่อความรู้จาก pretrain model ไปยังโมเดลใหม่โดยโมเดลใหม่นี้จะเริ่มต้นการเทรนจาก weight ของโมเดลแรก",
explain: "ถูกต้อง พอเราเริ่มเทรนโมเดลกับงานใหม่ ความรู้จะถูก*ส่งต่อ*มาจากจากโมเดลแรก",
correct: true
},
{
text: "ส่งต่อความรู้จาก pretrain model ไปยังโมเดลใหม่โดยสร้างโมเดลใหม่ด้วยสถาปัตยกรรมเดียวกับโมเดลแรก",
explain: "สถาปัตยกรรมเป็นเพียงวิธีการสร้างโมเดล ไม่มีความรู้ใด ๆ รวมอยู่ข้างใน"
}
]}
/>
### 6. ประโยคต่อไปนี้ถูกหรือผิด? โมเดลบริบทภาษาเป็นการเทรนล่วงหน้าที่ไม่ต้องการ label ในการเทรน
<Question
choices={[
{
text: "ถูก",
explain: "การเทรนล่วงหน้านั้นส่วนใหญ่จะเป็นการ <em>self-supervise</em> นั่นคือ label จะถูกสร้างขึ้นอัตโนมัติจาก input เอง (เช่นการทำนายคำต่อไปในข้อความ หรือเติมคำในช่องว่าง)",
correct: true
},
{
text: "ผิด",
explain: "คำตอบนี้ผิด"
}
]}
/>
### 7. โปรดเลือกประโยคที่อธิบายคำว่า "model", "architecture" และ "weight" ได้อย่างถูกต้อง"
<Question
choices={[
{
text: "หากเปรียบ model เป็นตึก architecture ก็เป็นแผนผัง และ weight ก็เป็นผู้คนด้านใน",
explain: "หากเทียบตามคำเปรียบเปรยนี้ weight ควรจะเป็นอิฐหรือวัสดุอื่น ๆ ในการสร้างตึกมากกว่า"
},
{
text: "หากเปรียบ architecture เป็นแผนที่ในการสร้าง model ค่า weight แต่ละค่าก็เป็นเหมือนเมืองต่าง ๆ ในแผนที่",
explain: "ข้อนี้เปรียบเทียบได้ไม่ถูกต้องเท่าไหร่ เพราะว่าจะมีเพียงเมืองเดียวที่อยู่บนแผนที่ที่ตำแหน่งเดียวกัน (ตัวอย่างเช่น มีเมืองเพียงเมืองเดียวในฝรั่งเศสที่ชื่อปารีส) สำหรับ architecture ใด ๆ ค่าชุด weight สามารถตั้งค่าหลากหลายแตกต่างกันได้"
},
{
text: "architecture คือฟังก์ชันทางคณิตศาสตร์ที่ใช้ในการสร้าง model โดย weight ก็คือค่าคงที่ที่ใช้ในฟังก์ชันเหล่านั้น",
explain: "ฟังก์ชันทางคณิตศาสตร์ชุดเดิม (architecture) สามารถใช้ในการสร้าง model ต่าง ๆ กันได้โดยใช้ค่าคงที่ (weight) ที่แตกต่างกัน",
correct: true
}
]}
/>
### 8. โมเดลใดต่อไปนี้เหมาะสมในการใช้สำหรับงานสร้างคำที่หายไปในประโยค?
<Question
choices={[
{
text: "โมเดล encoder",
explain: "โมเดล encoder สร้างตัวแทนของประโยคทั้งประโยค เหมาะสำหรับงานเช่น การแยกแยะประเภทของประโยค"
},
{
text: "โมเดล decoder",
explain: "โมเดล decoder เหมาะสำหรับงานสร้างคำที่หายไปจากข้อความในประโยคมากที่สุด",
correct: true
},
{
text: "โมเดล sequence-to-sequence",
explain: "โมเดล sequence-to-sequence เหมาะสำหรับงานที่ต้องการสร้างประโยคที่มีความสัมพันธ์กับประโยคที่ใส่เข้ามา ไม่ใช่แค่คำบางคำจากในประโยค"
}
]}
/>
### 9. โมเดลประเภทใดต่อไปนี้เหมาะสำหรับงานในการสรุปความ?
<Question
choices={[
{
text: "โมเดล encoder",
explain: "โมเดล encoder สร้างตัวแทนของประโยคทั้งประโยค เหมาะสำหรับงานเช่น การแยกแยะประเภทของประโยค"
},
{
text: "โมเดล decoder",
explain: "โมเดล decoder สามารถใช้ในการสร้างข้อความได้ (เช่น ข้อความสรุป) แต่โมเดลนี้ไม่สามารถเข้าใจข้อความทั้งหมดเพื่อทำการสรุปได้"
},
{
text: "โมเดล sequence-to-sequence",
explain: "โมเดล sequence-to-sequence เหมาะสำหรับงานสรุปความที่สุด",
correct: true
}
]}
/>
### 10. โมเดลประเภทใดต่อไปนี้เหมาะสำหรับงานในการแยกแยะประเภทประโยคตาม label ที่กำหนดให้?
<Question
choices={[
{
text: "โมเดล encoder",
explain: "โมเดล encoder สร้างตัวแทนของประโยคทั้งประโยค เหมาะสำหรับงานเช่น การแยกแยะประเภทของประโยคแบบนี้ที่สุด",
correct: true
},
{
text: "โมเดล decoder",
explain: "โมเดล decoder เหมาะสำหรับงานในการสร้างข้อความ ไม่เหมาะสำหรับการสกัด label ออกจากประโยคแบบนี้"
},
{
text: "โมเดล sequence-to-sequence",
explain: "โมเดล sequence-to-sequence เหมาะสำหรับงานที่คุณต้องการสร้างข้อความจากประโยค input ไม่ใช่จาก label",
}
]}
/>
### 11. อคติของโมเดลสามารถเกิดได้จากข้อใดต่อไปนี้ได้บ้าง?
<Question
choices={[
{
text: "fine-tune โมเดลมาจากโมเดล pretrain ทำให้โมเดลที่ fine-tune นั้นรับอคติมาจากโมเดล pretrain",
explain: "เมื่อคุณใช้งาน transfer learning อคติจากโมเดล pretrain จะส่งต่อไปยังโมเดลที่ fine-tune",
correct: true
},
{
text: "ข้อมูลที่ใช้เทรนโมเดลเป็นข้อมูลที่มีอคติปนอยู่",
explain: "ข้อนี้เป็นแหล่งกำเนิดอคติที่ชัดที่สุด แต่ว่าไม่ได้มีข้อนี้ข้อเดียว",
correct: true
},
{
text: "metric ที่ใช้วัดระหว่างการ optimize มีอคติปนอยู่",
explain: "ข้อนี้อาจดูไม่เหมือนว่าจะทำให้เกิดอคติในโมเดล แต่ว่าโมเดลของคุณจะปรับปรุงตัวเองไปเรื่อย ๆ ระหว่างเทรนตาม metric ที่เราเลือกโดยไม่มีการคิดซ้ำอีกครั้ง",
correct: true
}
]}
/>
| course/chapters/th/chapter1/10.mdx/0 | {
"file_path": "course/chapters/th/chapter1/10.mdx",
"repo_id": "course",
"token_count": 9848
} | 144 |
<FrameworkSwitchCourse {fw} />
<!-- DISABLE-FRONTMATTER-SECTIONS -->
# แบบทดสอบท้ายบท
<CourseFloatingBanner
chapter={2}
classNames="absolute z-10 right-0 top-0"
/>
### 1. ลำดับขั้นตอนใน pipeline ของการทำโมเดลด้านภาษา(language modeling)เป็นอย่างไร ?
<Question
choices={[
{
text: "ขั้นตอนแรก โมเดลจะทำการประมวลผลข้อความและให้ผลการทำนายออกมา หลังจากนั้น tokenizer จะทำการวิเคราะห์ผลการทำนายเหล่านี้และแปลงมันกลับมาเป็นข้อความเมื่อจำเป็น",
explain: "โมเดลไม่สามารถเข้าใจข้อความได้! tokenizer จำเป็นต้องทำการ tokenize ข้อความก่อนและแปลงมันไปเป็น IDs ที่โมเดลสามารถเข้าใจมันได้"
},
{
text: "ขั้นตอนแรก tokenizer จะประมวลผลข้อความและให้ IDs ออกมา หลังจากนั้นโมเดลจะประมวลผล IDs เหล่านี้และให้ผลการทำนายออกมา ซึ่งอาจจะเป็นข้อความบางอย่าง",
explain: "ผลการทำนายของโมเดลไม่สามารถเป็นข้อความออกมาได้โดยตรง tokenizer จำเป็นต้องถูกใช้สำหรับการแปลงผลการทำนายกลับไปเป็นข้อความ!"
},
{
text: "tokenizer ประมวลผลข้อความและให้ IDs ออกมา หลังจากนั้นโมเดลจะประมวลผล IDs เหล่านี้และให้ผลการทำนายออกมา จากนั้น tokenizer จะถูกใช้อีกครั้งในการแปลงผลการทำนายเหล่านี้กลับไปเป็นข้อความ",
explain: "ถูกต้อง! tokenizer สามารถถูกใช้สำหรับทั้งการแปลงข้อความไปเป็น IDs (tokenizing) และการแปลงผลการทำนายกลับไปเป็นข้อความ(de-tokenizing)",
correct: true
}
]}
/>
### 2. tensor ที่เป็นเอาท์พุตออกมาจากโมเดล Transformer แบบพื้นฐานมีขนาดกี่มิติ และมิติเหล่านั้นเป็นอะไรบ้าง?
<Question
choices={[
{
text: "2: ความยาวของประโยค(sequence length) และขนาดของชุดข้อมูล(batch size)",
explain: "ผิด! tensor ที่ออกมาจากโมเดลมีมิติที่สามด้วย: ขนาดของเลเยอร์ภายใน(hidden size)"
},
{
text: "2: ความยาวของประโยค(sequence length) และขนาดของเลเยอร์ภายใน(hidden size)",
explain: "ผิด! โมเดล Transformer ทุกโมเดลจะรับข้อมูลแบบชุด(batch), ถึงแม้จะมีประโยคเพียงประโยคเดียว; ขนาดของชุด(batch size) ก็เป็น 1!"
},
{
text: "3: ความยาวของประโยค(sequence length) ขนาดของชุดข้อมูล(batch size) และขนาดของเลเยอร์ภายใน(hidden size)",
explain: "ถูกต้อง!",
correct: true
}
]}
/>
### 3. ข้อใดต่อไปนี้เป็นตัวอย่างของ tokenization แบบคำย่อย(subword)?
<Question
choices={[
{
text: "WordPiece",
explain: "ถูกต้อง, ข้อนี้เป็นตัวอย่างหนึ่งของ tokenization แบบคำย่อย(subword)!",
correct: true
},
{
text: "tokenization ที่เน้นที่ตัวอักษร(Character-based)",
explain: "tokenization ที่เน้นที่ตัวอักษร(Character-based) ไม่ได้เป็นประเภทของ tokenization แบบคำย่อย(subword)"
},
{
text: "Splitting on whitespace and punctuation",
explain: "อันนี้เป็น tokenization ประเภทเน้นคำ(word-based)!"
},
{
text: "BPE",
explain: "ถูกต้อง, ข้อนี้เป็นตัวอย่างหนึ่งของ tokenization แบบคำย่อย(subword)!",
correct: true
},
{
text: "Unigram",
explain: "ถูกต้อง, ข้อนี้เป็นตัวอย่างหนึ่งของ tokenization แบบคำย่อย(subword)!",
correct: true
},
{
text: "ไม่มีคำตอบที่ถูกต้อง",
explain: "ผิด!"
}
]}
/>
### 4. model head คืออะไร?
<Question
choices={[
{
text: "ส่วนประกอบหนึ่งของโมเดล Transformer พื้นฐาน ที่ส่งต่อ tensor ไปยังเลเยอร์ที่ถูกต้องของมัน",
explain: "ผิด! ไม่มีส่วนประกอบแบบนั้น."
},
{
text: "มันถูกเรียกอีกอย่างหนึ่งว่าเป็นกระบวนการ self-attention, มันดัดแปลงตัวแทน(representation) ของ token หนึ่งๆ เทียบกับ tokens อื่นๆ ในประโยค",
explain: "ผิด! เลเยอร์ self-attention นั้นมี attention \"heads,\" แต่มันไม่ใช่ตัวที่เอาไว้ดัดแปลงข้อมูล(not adaptation heads)"
},
{
text: "เป็นส่วนประกอบเสริม ที่ประกอบด้วยเลเยอร์อย่างน้อยหนึ่งเลเยอร์ ใช้สำหรับแปลงผลการทำนายของ transformer ไปเป็นผลลัพท์เฉพาะสำหรับงานหนึ่งๆ",
explain: "ถูกต้อง. Adaptation heads เรียกง่ายๆ ก็คือ heads, มีหลากหลายรูปแบบ: heads สำหรับโมเดลด้านภาษา(language modeling), heads สำหรับการตอบคำถาม(question answering), heads สำหรับการจำแนกประโยค(sequence classification) ... ",
correct: true
}
]}
/>
{#if fw === 'pt'}
### 5. AutoModel คืออะไร?
<Question
choices={[
{
text: "โมเดลที่เทรนด้วยข้อมูลของคุณแบบอัตโนมัติ",
explain: "ผิด คุณสับสนกับผลิตภัณฑ์ <a href='https://huggingface.co/autonlp'>AutoNLP</a> ของเราหรือเปล่า?"
},
{
text: "เป็น object ที่ให้สถาปัตยกรรมที่ถูกต้องสำหรับ checkpoint นั้นๆออกมา",
explain: "ถูกต้อง: <code>AutoModel</code> จำเป็นต้องรู้เพียงแค่ว่า checkpoint ใดที่จะใช้ในการสร้างโมเดลและให้สถาปัตยกรรมที่ถูกต้องกลับมา",
correct: true
},
{
text: "โมเดลที่ตรวจหาภาษาที่ใช้สำหรับเป็นอินพุตของมันโดยอัตโนมัติเพื่อที่จะโหลด weights ที่ถูกต้อง",
explain: "ผิด; ในขณะที่บาง checkpoints และโมเดล นั้นสามารถประมวลผลได้หลายภาษา, แต่ไม่มีเครื่องมือแบบ built-in สำหรับเลือก checkpoint ที่ตรงกับภาษาแบบอัตโนมัติเลย คุณลองไปดูที่ <a href='https://huggingface.co/models'>Model Hub</a> เพื่อหา checkpoint ที่ดีที่สุดสำหรับงานของคุณ!"
}
]}
/>
{:else}
### 5. TFAutoModel คืออะไร?
<Question
choices={[
{
text: "โมเดลที่เทรนด้วยข้อมูลของคุณแบบอัตโนมัติ",
explain: "ผิด คุณสับสนกับผลิตภัณฑ์ <a href='https://huggingface.co/autonlp'>AutoNLP</a> ของเราหรือเปล่า?"
},
{
text: "เป็น object ที่ให้สถาปัตยกรรมที่ถูกต้องสำหรับ checkpoint นั้นๆออกมา",
explain: "ถูกต้อง: <code>AutoModel</code> จำเป็นต้องรู้เพียงแค่ว่า checkpoint ใดที่จะใช้ในการสร้างโมเดลและให้สถาปัตยกรรมที่ถูกต้องกลับมา",
correct: true
},
{
text:"โมเดลที่ตรวจหาภาษาที่ใช้สำหรับเป็นอินพุตของมันโดยอัตโนมัติเพื่อที่จะโหลด weights ที่ถูกต้อง",
explain: "ผิด; ในขณะที่บาง checkpoints และโมเดล นั้นสามารถประมวลผลได้หลายภาษา, แต่ไม่มีเครื่องมือแบบ built-in สำหรับเลือก checkpoint ที่ตรงกับภาษาแบบอัตโนมัติเลย คุณลองไปดูที่ <a href='https://huggingface.co/models'>Model Hub</a> เพื่อหา checkpoint ที่ดีที่สุดสำหรับงานของคุณ!"
}
]}
/>
{/if}
### 6. มีเทคนิคอะไรบ้างที่เราต้องคำนึงถึงเมื่อจะต้องทำการจัดประโยคที่มีความยาวแตกต่างกันเข้าเป็นชุดเดียวกัน(batching)?
<Question
choices={[
{
text: "การตัด(Truncating)",
explain: "ถูกต้อง, การตัด(truncation) เป็นวิธีที่ถูกต้องในการทำให้ประโยคนั้นมีความยาวเท่ากันเพื่อให้สามารถใส่เข้าไปแล้วชุดข้อมูลมีขนาดเป็นจตุรัส แต่มันมีเพียงแค่เทคนิคนี้เทคนิคเดียวหรือเปล่า?",
correct: true
},
{
text: "ให้ผลลัพท์ที่เป็น tensors ออกมา",
explain: "ในขณะที่ใช้เทคนิคอื่นๆ นั้น คุณสามารถได้ผลลัพท์ที่เป็น tensors แบบจตุรัสได้, ดังนั้นการให้ผลลัพท์ที่เป็นเพียง tensor นั้นไม่ได้เป็นประโยชน์เมื่อทำการรวมประโยคเข้าเป็นชุดเดียวกัน"
},
{
text: "การเติม(Padding)",
explain: "ถูกต้อง, การเติม(padding) เป็นวิธีที่ถูกต้องในการทำให้ประโยคนั้นมีความยาวเท่ากันเพื่อให้สามารถใส่เข้าไปแล้วชุดข้อมูลมีขนาดเป็นจตุรัส แต่มันมีเพียงแค่เทคนิคนี้เทคนิคเดียวหรือเปล่า?",
correct: true
},
{
text: "Attention masking",
explain: "ถูกต้องเลย! Attention masks เป็นกระบวนการที่สำคัญมากในการจัดการประโยคที่มีความยาวแตกต่างกัน แต่นั้นก็ไม่ใช่เพียงวิธีเดียว",
correct: true
}
]}
/>
### 7. อะไรคือจุดประสงค์ของการใช้ฟังก์ชัน SoftMax กับผลลัพท์ที่เป็น logits ที่ได้จากโมเดลสำหรับจำแนกประโยค (sequence classification model)?
<Question
choices={[
{
text: "มันจะช่วยลดค่าของ logits ลงทำให้มันมีความน่าเชื่อถือมากขึ้น",
explain: "ผิด, ฟังก์ชัน SoftMax ไม่ได้ส่งผลอะไรกับความน่าเชื่อถือของผลลัพท์"
},
{
text: "มันกำหนดขอบเขตค่าบนและล่าง เพื่อทำให้ค่าที่ได้สามารถเป็นที่เข้าใจได้",
explain: "ถูกต้อง! ค่าผลลัพท์ที่ได้มีขอบเขตระหว่าง 0 และ 1 แต่นี่ก็ไม่ได้เป็นเพียงเหตุผลเดียวที่เราใช้ฟังก์ชัน SoftMax",
correct: true
},
{
text: "ผลรวมของผลลัพท์เท่ากับ 1 ส่งผลให้สามารถแปลความหมายทางสถิติได้",
explain: "ถูกต้อง! แต่นี่ก็ไม่ได้เป็นเพียงเหตุผลเดียวที่เราใช้ฟังก์ชัน SoftMax",
correct: true
}
]}
/>
### 8. วิธีใดที่เป็นหัวใจหลักของ tokenizer API ส่วนใหญ่?
<Question
choices={[
{
text: "<code>encode</code>, เพราะมันสามารถเข้ารหัสข้อความไปเป็น IDs และ IDs ไปเป็นคำทำนายได้",
explain: "ผิด! ในขณะที่วิธีการ <code>encode</code> นั้นไม่มีอยู่ใน tokenizers, มันก็ไม่มีอยู่ในโมเดลเช่นเดียวกัน"
},
{
text: "การเรียก tokenizer object โดยตรง",
explain: "ถูกต้อง! วิธี <code>__call__</code> ของ tokenizer เป็นวิธีการที่ทรงพลังมากที่สามารถจัดการได้เกือบทุกอย่าง และมันก็เป็นวิธีการที่ถูกใช้ในการเอาผลการทำนายออกมาจากโมเดลด้วย",
correct: true
},
{
text: "<code>pad</code>",
explain: "ผิด! การเติม(Padding) เป็นประโยชน์มาก แต่มันก็เป็นแค่ส่วนหนึ่งของ tokenizer API"
},
{
text: "<code>tokenize</code>",
explain: "วิธี <code>tokenize</code> เป็นหนึ่งวิธีที่มีประโยชน์มากแต่มันก็ไม่ใช่หัวใจหลักของ tokenizer API"
}
]}
/>
### 9. ตัวแปร `result` ในตัวอย่างโค้ดนี้มีค่าอะไรอยู่บ้าง?
```py
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
result = tokenizer.tokenize("Hello!")
```
<Question
choices={[
{
text: "ลิสท์ของกลุ่มตัวอักษร(A list of strings), โดยที่แต่ละ string นั้นเป็น token",
explain: "ถูกต้อง! แปลง token ไปเป็น IDs และส่งเข้าไปยังโมเดล!",
correct: true
},
{
text: "ลิสท์ของ IDs",
explain: "ไม่ถูกต้อง; นั้นเป็นหน้าที่ของ <code>__call__</code> หรือ <code>convert_tokens_to_ids</code>!"
},
{
text: "string ที่ประกอบด้วย tokens ทั้งหมด",
explain: "นั้นอาจจะไม่ใช่วิธีที่ดีที่สุด, เมื่อเป้าหมายคือการแบ่ง string ออกเป็นหลายๆ tokens "
}
]}
/>
{#if fw === 'pt'}
### 10. มีอะไรบางอย่างที่ผิดปกติกับโค้ดต่อไปนี้หรือไม่?
```py
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
model = AutoModel.from_pretrained("gpt2")
encoded = tokenizer("Hey!", return_tensors="pt")
result = model(**encoded)
```
<Question
choices={[
{
text: "ไม่ ดูเหมือนว่ามันจะปกติ",
explain: "โชคไม่ดีเลย, การต่อโมเดลกับ tokenizer ที่ผ่านการเทรนที่มาจาก checkpoint ที่แตกต่างกันนั้นเป็นไอเดียที่ไม่ค่อยดีเท่าไหร่ โมเดลไม่ได้ถูกเทรนมาให้สามารถเข้าใจผลลัพท์ที่ได้จาก tokenizer นี้ ดังนั้นผลลัพท์ของโมเดล(ถ้ามันสามารถรันได้!)ก็จะดูไม่เป็นเหตุเป็นผลซักเท่าไหร่"
},
{
text: "tokenizer และโมเดลควรจะมาจาก checkpoint เดียวกันเสมอ",
explain: "ถูกต้อง!",
correct: true
},
{
text: "มันเป็นสิ่งที่ดีในการเติม(pad) และ ตัด(truncate) โดยใช้ tokenizer ซึ่งทุกอินพุดก็เป็นชุดของข้อมูลอยู่แล้ว",
explain: "ถูกต้องที่ว่าทุกอินพุตของโมเดลจำเป็นต้องอยู่ในรูปแบบของชุด(batch) แต่อย่างไรก็ตาม การตัด(truncating) หรือ การเติม(padding) ประโยคนี้อาจจะไม่สมเหตุสมผล เนื่องจากมันมีแค่ประโยคเดียวและเทคนิคเหล่านั้นเป็นเทคนิคสำหรับการรวมกลุ่มประโยค(a list of sentences)เข้าด้วยกันให้เป็นชุดๆ(batch)"
}
]}
/>
{:else}
### 10. มีอะไรบางอย่างที่ผิดปกติกับโค้ดต่อไปนี้หรือไม่?
```py
from transformers import AutoTokenizer, TFAutoModel
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
model = TFAutoModel.from_pretrained("gpt2")
encoded = tokenizer("Hey!", return_tensors="pt")
result = model(**encoded)
```
<Question
choices={[
{
text: "ไม่ ดูเหมือนว่ามันจะปกติ",
explain: "โชคไม่ดีเลย, การต่อโมเดลกับ tokenizer ที่ผ่านการเทรนที่มาจาก checkpoint ที่แตกต่างกันนั้นเป็นไอเดียที่ไม่ค่อยดีเท่าไหร่ โมเดลไม่ได้ถูกเทรนมาให้สามารถเข้าใจผลลัพท์ที่ได้จาก tokenizer นี้ ดังนั้นผลลัพท์ของโมเดล(ถ้ามันสามารถรันได้!)ก็จะดูไม่เป็นเหตุเป็นผลซักเท่าไหร่"
},
{
text: "tokenizer และโมเดลควรจะมาจาก checkpoint เดียวกันเสมอ",
explain: "ถูกต้อง!",
correct: true
},
{
text: "มันเป็นสิ่งที่ดีในการเติม(pad) และ ตัด(truncate) โดยใช้ tokenizer ซึ่งทุกอินพุดก็เป็นชุดของข้อมูลอยู่แล้ว",
explain: "ถูกต้องที่ว่าทุกอินพุตของโมเดลจำเป็นต้องอยู่ในรูปแบบของชุด(batch) แต่อย่างไรก็ตาม การตัด(truncating) หรือ การเติม(padding) ประโยคนี้อาจจะไม่สมเหตุสมผล เนื่องจากมันมีแค่ประโยคเดียวและเทคนิคเหล่านั้นเป็นเทคนิคสำหรับการรวมกลุ่มประโยค(a list of sentences)เข้าด้วยกันให้เป็นชุดๆ(batch)"
}
]}
/>
{/if}
| course/chapters/th/chapter2/8.mdx/0 | {
"file_path": "course/chapters/th/chapter2/8.mdx",
"repo_id": "course",
"token_count": 14298
} | 145 |
# การเทรน tokenizer จาก tokenizer ที่มีอยู่แล้ว
<CourseFloatingBanner chapter={6}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/th/chapter6/section2.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/th/chapter6/section2.ipynb"},
]} />
สมมติว่าคุณต้องการจะใช้ language model ในการทำงานใดงานหนึ่ง แต่ตอนนี้ไม่มีโมเดลในภาษาที่คุณต้องการหรือโมเดลที่มีอยู่นั้นถูกเทรนจากคลังข้อมูลที่แตกต่างจากข้อมูลที่คุณต้องการจะใช้งานมาก
ในกรณีนี้คุณอาจจะจำเป็นต้องเทรน langauge model ขึ้นมาใหม่ เพื่อให้ได้โมเดลที่เหมาะกับการใช้งานของคุณ และในการเทรนนั้นคุณก็ต้องมี tokenizer ที่เหมาะกับข้อมูลของคุณ
แล้ววิธีเทรน tokenizer ขึ้นมาใหม่นั้นทำได้อย่างไร?
ใน[บทที่ 2](/course/chapter2) คุณจะเห็นว่าโมเดล Transformer ส่วนมากใช้เทคนิคการตัดคำที่ใช้หน่วยย่อยของคำ (_subword tokenization algorithm_ )
ในการตัดคำแบบนี้ ตัวตัดคำจะต้องหาหน่วยย่อยของคำ(subword)ที่เป็นประโยชน์และพบบ่อยในคลังข้อมูล ในกระบวนการหาคำย่อยนี้ tokenizer จะต้องอ่านทุกๆข้อความในคลังข้อมูล ขั้นตอนนี้เราเรียกว่าการ*เทรน*
กฎที่ใช้ในการเทรนนั้นขึ้นกับประเภทของ tokenizer ที่เราเลือกใช้ เราจะพูดถึงกับอัลกอริทึม 3 แบบที่ใช้ในการเทรน tokenizer กันในตอนท้ายของบทนี้
<Youtube id="DJimQynXZsQ"/>
<Tip warning={true}>
⚠️ การเทรน tokenize จะไม่เหมือนการกับเทรนโมเดลทั่วไป ในการเทรนโมเดลทั่วไปเราใช้ stochastic gradient descent เพื่อลดค่า loss ในทุก batch กระบวนการนี้มีความ random อยู่ในตัวของมัน (ซึ่งแปลว่า ถ้าคุณเทรนโมเดลสองครั้งแล้วอยากได้ผลลัพธ์ที่เหมือนกัน คุณจะต้องตั้งค่า seed ของการ random ให้เหมือนกันในทุกครั้งที่คุณเทรน)
ส่วนการเทรน tokenize เป็นกระบวนการทางสถิติที่พยายามจะค้นหาคำย่อยที่เหมาะสมที่สุดจากคลังข้อมูลหนึ่ง วิธีในการเลือกค้นหาคำย่อยนี้ก็มีหลากหลายวิธี
ผลลัพธ์ของการเทรนประเภทนี้จะมีความคงที่ (deterministic) ซึ่งแปลว่าคุณจะได้ผลลัพธ์เดิมทุกครั้งหลังจากการเทรน ถ้าหากคุณใช้อัลกอริทึมและข้อมูลเดิมทุกครั้ง
</Tip>
## การสร้างคลังข้อมูล (Assembling a corpus)
🤗 Transformers มี API ที่ใช้งานง่าย ที่สามารถใช้เทรน tokenizer ให้มีลักษณะเหมือน tokenizer ตัวอื่นที่เรามีอยู่แล้ว โดยการใช้ `AutoTokenizer.train_new_from_iterator()`
เพื่อให้คุณเห็นภาพชัดเจน เราจะสมมติว่า คุณต้องการเทรนโมเดล GPT-2 ตั้งแต่เริ่มแรก แต่เป็นภาษาอื่นที่ไม่ใช่ภาษาอังกฤษ
สิ่งที่แรกที่คุณต้องทำคือรวบรวมข้อความในภาษานั้นเพื่อสร้างชุดข้อมูลสำหรับการเทรน
ในตัวอย่างต่อไปนี้ เพื่อให้ผู้อ่านทุกคนเข้าใจได้ง่าย เราจะไม่ใช้ภาษารัสเซียหรือภาษาจีนเป็นตัวอย่าง แต่จะใช้ภาษาหนึ่งที่เป็นภาษาอังกฤษแบบพิเศษ นั่นคือ Python code
เราจะใช้ [🤗 Datasets](https://github.com/huggingface/datasets) library เพื่อช่วยสร้างคลังข้อมูล
และใช้ฟังก์ชัน `load_dataset()` เพื่อดาวโหลดและ cache ชุดข้อมูล [CodeSearchNet](https://huggingface.co/datasets/code_search_net)
ชุดข้อมูลชุดนี้ถูกสร้างขึ้นมาเพื่อใช้ในการแข่งขัน [CodeSearchNet challenge](https://wandb.ai/github/CodeSearchNet/benchmark)
และประกอบไปด้วยโค้ดของฟังก์ชันจาก open source libraries จาก GitHub ในหลายๆภาษา เราจะดาวโหลดเฉพาะโค้ดที่เป็น Python
```py
from datasets import load_dataset
# This can take a few minutes to load, so grab a coffee or tea while you wait!
raw_datasets = load_dataset("code_search_net", "python")
```
คุณสามารถเช็คดูข้อมูลส่วนที่ใช้เทรนได้โดยรันโค้ดข้างล่างนี้ เพื่อจะได้ดูว่าในชุดข้อมูลมีคอลัมน์อะไรบ้าง
```py
raw_datasets["train"]
```
```python out
Dataset({
features: ['repository_name', 'func_path_in_repository', 'func_name', 'whole_func_string', 'language',
'func_code_string', 'func_code_tokens', 'func_documentation_string', 'func_documentation_tokens', 'split_name',
'func_code_url'
],
num_rows: 412178
})
```
เราจะเห็นว่าในชุดข้อมูลนี้ ส่วนที่เป็น docstrings จะถูกแยก ออกจากส่วนที่เป็น code และนอกจากนั้น แต่ละส่วนยังมีอีกคอลัมน์เพื่อเก็บข้อความที่ถูกตัดออกเป็น token แล้วอีกด้วย
เราจะใช้แค่คอลัมน์ `whole_func_string` ในการเทรน tokenizer ของเรา
คุณสามารถสุ่มตัวอย่างของข้อมูลในแต่ละคอลัมน์มาดูได้ดังนี้
```py
print(raw_datasets["train"][123456]["whole_func_string"])
```
คำสั่งข้างบนจะ print ผลลัพธ์ข้างล่างนี้ :
```out
def handle_simple_responses(
self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Accepts normal responses from the device.
Args:
timeout_ms: Timeout in milliseconds to wait for each response.
info_cb: Optional callback for text sent from the bootloader.
Returns:
OKAY packet's message.
"""
return self._accept_responses('OKAY', info_cb, timeout_ms=timeout_ms)
```
หลังจากนั้น เราก็จะต้องแปลงชุดข้อมูลนี้เป็น _iterator_ ของ list ของข้อความ (_iterator_ of lists of texts) ตัวอย่างเช่น list ของ list ของข้อความ (list of list of texts)
การใช้ list ของข้อความแบบนี้ จะทำให้การเทรนเร็วขึ้น เพราะว่าการเทรนเป็น batch จะเร็วกว่าการประมวลผลครั้งละหนึ่งข้อความ และสาเหตุที่ input ควรจะเป็น iterator ก็เพื่อป้องกันไม่ให้ Python อ่านข้อความทั้งหมดเข้าไปเก็บใน memory ของคอมพิวเตอร์ภายในครั้งเดียว
ถ้าชุดข้อมูลของคุณนั้นใหญ่มาก คุณอาจจะลองใช้ 🤗 Datasets เพื่อช่วยจัดการชุดข้อมูล เพราะมันจะไม่อ่านข้อมูลทั้งหมดเข้าไปเก็บใน RAM แต่บันทึกข้อมูลใน disk แทน
โค้ดข้างล่างนี้จะสร้าง list ของ list ของ 1,000 ข้อความ (list of lists of 1,000 texts) และจะโหลดข้อมูล input ทั้งหมดไปเก็บใน memory:
```py
# Don't uncomment the following line unless your dataset is small!
# training_corpus = [raw_datasets["train"][i: i + 1000]["whole_func_string"] for i in range(0, len(raw_datasets["train"]), 1000)]
```
ถ้าหากคุณเปลี่ยนมาใช้ Python generator แทน ก็จะป้องกันไม่ให้ Python โหลดข้อมูลทั้งหมดเข้าไปใน memory ถ้าไม่จำเป็น
วิธีการสร้าง generator ก็ง่ายๆเพียงแค่ แทนที่วงเล็บเหลี่ยม `[` ด้วยเว็บเล็บธรรมดา `(` ในโค้ดข้างบน:
```py
training_corpus = (
raw_datasets["train"][i : i + 1000]["whole_func_string"]
for i in range(0, len(raw_datasets["train"]), 1000)
)
```
โค้ดข้างบนนี้ จะไม่โหลดข้อความจาก `raw_datasets` ทั้งหมดเข้าไปใน memory แต่จะสร้าง iterator ซึ่งเป็น Python object ที่เป็นเสมือนตัวเก็บข้อมูลชั่วคราว
การจะเรียกใช้ข้อมูลในนั้น ทำได้โดยใช้ `for` loop ข้อความใน iterator จะถูกโหลดเข้าไปใน memory ก็ต่อเมื่อคุณจะใช้งานมันเท่านั้น(ซึ่งก็คือ เวลาที่ `for` loop วนไปถึง item นั้น) ในตัวอย่างของเรา ในแต่ละ loop จะมีเพียงแค่ 1000 ข้อความเท่านั้นที่จะถูกโหลดมาเก็บไว้ใน memory การทำแบบนี้จะช่วยไม่ให้ memory ถูกใช้งานมากเกินไป หากคุณมีชุดข้อมูลที่ใหญ่มาก
แต่ข้อเสียของ generator ก็คือเราสามารถใช้มันได้แค่ครั้งเดียว ดูตัวอย่างจากโค้ดข้างล่างนี้
```py
gen = (i for i in range(10))
print(list(gen))
print(list(gen))
```
เราจะเห็นว่าโค้ดนี้ print ผลลัพธ์แค่ครั้งแรก ส่วนในการสั่ง print ครั้งที่สองเราได้เพียง list เปล่ากลับมา:
```python out
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
[]
```
เพื่อแก้ปัญหานี้ เราจะสร้างฟังก์ชันที่ผลิต Python generator เพื่อเอาไว้เก็บชุดข้อมูลแทน:
```py
def get_training_corpus():
return (
raw_datasets["train"][i : i + 1000]["whole_func_string"]
for i in range(0, len(raw_datasets["train"]), 1000)
)
training_corpus = get_training_corpus()
```
การสร้าง generator ทำได้โดย ใช้ `for` loop และ `yield` statement:
```py
def get_training_corpus():
dataset = raw_datasets["train"]
for start_idx in range(0, len(dataset), 1000):
samples = dataset[start_idx : start_idx + 1000]
yield samples["whole_func_string"]
```
ฟังก์ชันนี้จะสร้าง generator แบบเดียวกับวิธีการข้างบน แต่ช่วยให้คุณสามารถเขียน logic ที่ซับซ้อนได้มากกว่าการใช้เพียง list comprehension
## การเทรน tokenizer
หลังจากเราก็มี iterator ที่แบ่งชุดข้อมูลเป็น batch แล้ว เราก็พร้อมแล้วที่จะเทรน tokenizer สิ่งแรกที่คุณต้องทำคือโหลด tokenizer ที่คุณต้องการจะใช้คู่กับโมเดลหลัก(ในตัวอย่างนี้โมเดลหลักของเราคือ GPT-2)
```py
from transformers import AutoTokenizer
old_tokenizer = AutoTokenizer.from_pretrained("gpt2")
```
ถึงแม้ว่าเป้าหมายของเราคือการเทรน tokenizer ใหม่ เราจะเริ่มต้นด้วยการโหลด tokenizer ที่ถูกเทรนมาแล้ว เพื่อที่เราจะได้ไม่ต้องเริ่มกระบวนการทั้งหมดตั้งแต่แรก
ข้อดีของการทำแบบนี้ก็คือ คุณไม่ต้องเสียเวลาตั้งค่าต่างๆ เช่น ประเภทอัลกอริทึมของ tokenizer หรือ token พิเศษต่างๆ tokenizer ตัวใหม่ของเราจะมีโครงสร้างเหมือนกับตัวที่ใช้ใน GPT-2 สิ่งเดียวที่แตกต่างคือชุดคำศัพท์(vocabulary) ซึ่งจะเปลี่ยนไปตามชุดข้อมูลใหม่ที่เราจะใช้
ก่อนอื่นมาดูกันว่า tokenizer ที่เราเพิ่งโหลดมา จะแบ่งข้อความตัวอย่างข้างล่างอย่างไร :
```py
example = '''def add_numbers(a, b):
"""Add the two numbers `a` and `b`."""
return a + b'''
tokens = old_tokenizer.tokenize(example)
tokens
```
```python out
['def', 'Ġadd', '_', 'n', 'umbers', '(', 'a', ',', 'Ġb', '):', 'Ċ', 'Ġ', 'Ġ', 'Ġ', 'Ġ"""', 'Add', 'Ġthe', 'Ġtwo',
'Ġnumbers', 'Ġ`', 'a', '`', 'Ġand', 'Ġ`', 'b', '`', '."', '""', 'Ċ', 'Ġ', 'Ġ', 'Ġ', 'Ġreturn', 'Ġa', 'Ġ+', 'Ġb']
```
tokenizer นี้มีการใช้สัญลักษณ์พิเศษ เช่น `Ġ` ซึ่งเอาไว้แทนช่องว่าง (space) และ `Ċ` ซึ่งแทนการเริ่มบรรทัดใหม่ (newline)
เราจะเห็นว่า ผลลัพธ์ของการตัดคำไม่ค่อยจะดีนัก เพราะว่าช่องว่างที่อยู่ต่อกันนั้น ถูกแบ่งออกเป็นอย่างละ token ซึ่งจริงๆแล้วการแบ่งที่ดีกว่านี้คือ ช่องว่างที่อยู่ติดกันควรจะถูกรวมให้เป็น token เดียว (เพราะว่าการพิมพ์ช่องว่าง 4 หรือ 8 ครั้ง เป็นสิ่งที่พบได้ทั่วไปในการเขียนโค้ด)
นอกจากนั้น tokenizer นี้ยังแบ่งชื่อฟังก์ชันได้ไม่ดีเท่าไหร่ เหมือนกับว่ามันไม่คุ้นเคยกับสัญลักษณ์ `_` ทำให้ชื่อฟังก์ชันถูกแยกออกเป็นสี่ส่วน
เรามาเริ่มเทรน tokenizer ตัวใหม่กัน แล้วดูว่า เราจะแก้ปัญหานี้ได้หรือเปล่า เราจะเริ่มจากการใช้ Python method ชื่อว่า `train_new_from_iterator()`:
```py
tokenizer = old_tokenizer.train_new_from_iterator(training_corpus, 52000)
```
เวลารันคำสั่งนี้ โปรแกรมอาจจะเวลาซักพัก ถ้าคุณใช้ชุดข้อมูลที่ใหญ่มาก แต่สำหรับชุดข้อมูลตัวอย่างของเราที่มีขนาด 1.6 GB การประมวลผลนั้นค่อนข้างเร็ว (ใช้เวลาทั้งหมด 1 นาที 16 วินาที บนซีพียู AMD Ryzen 9 3900X CPU ซึ่งมี 12 cores)
สิ่งหนึ่งที่คุณควรรู้คือ `AutoTokenizer.train_new_from_iterator()` นั้น ใช้งานได้แค่ในกรณีที่ตัวตัดคำเป็นแบบเร็ว
คุณจะได้เห็นในบทต่อไปว่า 🤗 Transformers library มี tokenizer สองประเภท ประเภทแรกคือตัวที่เขียนด้วย Python ล้วน และประเภทที่สอง(แบบเร็ว)ที่สร้างจาก 🤗 Tokenizers library ซึ่งใช้ภาษา [Rust](https://www.rust-lang.org) ในการเขียน
แม้ว่า Python จะเป็นภาษาที่ได้รับความนิยมมากที่สุดในงานด้าน data science และ deep learning แต่ถ้าเราต้องการประมวลผลข้อมูลให้รวดเร็วมากขึ้น โดยใช้การประมวลผลแบบ parallel (หมายถึง ประมวลผลหลายๆงานพร้อมๆกัน) เราจำเป็นต้องเขียนโปรแกรมด้วยภาษาอื่น
ตัวอย่างเช่น การคูณเมทริกซ์ ซึ่งถือเป็นการคำนวณหลักในการประมวลผลของโมเดลประเภท neural network โค้ดส่วนนี้จะถูกเขียนด้วยภาษา CUDA ซึ่งเป็น C library ที่ถูกพัฒนาให้เหมาะกับการใช้งานร่วมกับ GPU
หากเราเขียนโปรแกรมสำหรับเทรน tokenizer ด้วย Python อย่างเดียว จะทำให้การคำนวณช้ามาก นี่คือเหตุผลที่ Huggingface สร้าง 🤗 Tokenizers library ขึ้นมา
แต่ไม่ต้องกังวลกับส่วนนี้ เพราะคุณไม่จำเป็นต้องรู้ภาษา Rust เพื่อจะใช้งานตัวตัดคำแบบเร็วนี้ เหมือนกับที่คุณไม่จำเป็นต้องรู้ภาษา CUDA เพื่อจะรันโมเดลบน GPU
🤗 Tokenizers library มี Python bindings สำหรับ method ที่ต้องเรียกใช้โค้ดจากภาษา Rust
ตัวอย่างเช่น โค้ดส่วนที่ทำให้การเทรน tokenizer เป็นไปแบบ parallel หรือตอนที่เรารัน tokenizer กับ
input แบบ batch [Chapter 3](/course/chapter3)
โมเดล Transformer ส่วนมากรองรับการใช้งานร่วมกับตัวตัดคำแบบเร็ว (แต่มีกรณียกเว้น คุณสามารถเช็คดูได้ที่[นี่](https://huggingface.co/transformers/#supported-frameworks))
สำหรับโมเดลที่รองรับการตัดคำแบบเร็ว `AutoTokenizer` API จะโหลดตัวตัดคำแบบเร็วเป็นค่าเริ่มต้นเสมอ
ใน section ถัดไปเราจะเรียนเกี่ยวกับ feature พิเศษต่างๆของตัวตัดคำแบบเร็ว ซึ่งจะมีประโยชน์ในงานประเภท token classification หรือ question answering
ก่อนที่เราจะไปดูรายละเอียดกัน เรามาดูประสิทธิภาพของ tokenizer ที่เพิ่งเทรนเสร็จแล้วของเรากันดีกว่า เราจะลองใส่ข้อความที่เราใช้ในตัวอย่างด้านบนให้กับ tokenizer ของเราดู
```py
tokens = tokenizer.tokenize(example)
tokens
```
```python out
['def', 'Ġadd', '_', 'numbers', '(', 'a', ',', 'Ġb', '):', 'ĊĠĠĠ', 'Ġ"""', 'Add', 'Ġthe', 'Ġtwo', 'Ġnumbers', 'Ġ`',
'a', '`', 'Ġand', 'Ġ`', 'b', '`."""', 'ĊĠĠĠ', 'Ġreturn', 'Ġa', 'Ġ+', 'Ġb']
```
ในผลลัพธ์ของการตัดคำ คุณจะยังเห็นสัญลักษณ์พิเศษ `Ġ` และ `Ċ` เหมือนในตัวอย่างก่อนหน้า แต่คุณจะสังเกตว่า ตอนนี้ tokenizer ของเรานั้นได้เรียนรู้และเห็นว่า token บางตัวนั้น โดดเด่นกว่าตัวอื่นๆในชุดข้อมูล
ตัวอย่างเช่น token `ĊĠĠĠ` แสดงถึงการย่อหน้า(indentation) และ `Ġ"""` แสดงถึงเครื่องหมายคำพูดสามตัว ที่โปรแกรมเมอร์ใช้เวลาจะเริ่มเขียน docstring
ตัวตัดคำใหม่นี้ ยังแบ่งชื่อฟังก์ชันได้อย่างถูกต้องอีกด้วย โดยแบ่งที่ `_`
การแบ่งคำแบบนี้ ทำให้สัญลักษณ์หรือตัวอักษรต่างๆถูกรวบให้กระทัดรัดขึ้น หากเทียบกับ tokenizer เก่าที่เทรนจากข้อความภาษาอังกฤษปกติ เราจะเห็นว่า ถ้าเราใช้ทั้งสอง tokenizer เพื่อตัดข้อความ input เดียวกัน tokenizer เก่าจะให้ผลลัพธ์ที่ยาวกว่า tokenizer ตัวใหม่
```py
print(len(tokens))
print(len(old_tokenizer.tokenize(example)))
```
```python out
27
36
```
มาดูอีกตัวอย่างกัน :
```python
example = """class LinearLayer():
def __init__(self, input_size, output_size):
self.weight = torch.randn(input_size, output_size)
self.bias = torch.zeros(output_size)
def __call__(self, x):
return x @ self.weights + self.bias
"""
tokenizer.tokenize(example)
```
```python out
['class', 'ĠLinear', 'Layer', '():', 'ĊĠĠĠ', 'Ġdef', 'Ġ__', 'init', '__(', 'self', ',', 'Ġinput', '_', 'size', ',',
'Ġoutput', '_', 'size', '):', 'ĊĠĠĠĠĠĠĠ', 'Ġself', '.', 'weight', 'Ġ=', 'Ġtorch', '.', 'randn', '(', 'input', '_',
'size', ',', 'Ġoutput', '_', 'size', ')', 'ĊĠĠĠĠĠĠĠ', 'Ġself', '.', 'bias', 'Ġ=', 'Ġtorch', '.', 'zeros', '(',
'output', '_', 'size', ')', 'ĊĊĠĠĠ', 'Ġdef', 'Ġ__', 'call', '__(', 'self', ',', 'Ġx', '):', 'ĊĠĠĠĠĠĠĠ',
'Ġreturn', 'Ġx', 'Ġ@', 'Ġself', '.', 'weights', 'Ġ+', 'Ġself', '.', 'bias', 'ĊĠĠĠĠ']
```
ในตัวอย่างนี้ นอกจากเราจะเห็น token ที่แสดงถึงย่อหน้าแล้ว เรายังเห็น token ของ double indentation ซึ่งคือ `ĊĠĠĠĠĠĠĠ` ส่วนคำที่มีความหมายพิเศษใน Python เช่น `class`, `init`, `call`, `self` และ `return` ก็ถูกแบ่งให้เป็นอย่างละ token อย่างถูกต้อง
นอกจากนั้น เราจะยังเห็นด้วยว่า tokenizer จะตัดแบ่งข้อความเวลาที่มันเห็นสัญลักษณ์ `_` และ `.` และยังแบ่งข้อความที่เป็น camel-cased ได้อย่างถูกต้อง เช่น `LinearLayer` ถูกแยกออกเป็น `["ĠLinear", "Layer"]`
## การบันทึก tokenizer
เพื่อที่เราจะสามารถใช้งาน tokenizer ที่เราเทรนเมื่อซักครู่นี้ได้อีกในครั้งหน้า เราจำเป็นจะต้องเก็บบันทึกมันไว้ ในการเซฟเราจะใช้ method ชื่อ `save_pretrained()`
```py
tokenizer.save_pretrained("code-search-net-tokenizer")
```
คำสั่งนี้จะสร้างแฟ้มงาน (folder) ขึ้นมาใหม่ ชื่อว่า *code-search-net-tokenizer* ซึ่งเอาไว้บันทึกข้อมูลต่างๆของ tokenizer ที่จำเป็นในการเรียกใช้งานอีกครั้ง
ถ้าคุณต้องการจะแชร์ tokenizer นี้กับเพื่อนร่วมงานหรือเพื่อนของคุณ คุณสามารถอัพโหลดมันไปที่ Hub ของ Huggingface ได้ โดยคุณจะต้อง login เข้าบัญชีก่อน
ถ้าคุณทำงานใน notebook (เช่น Jupyter notebook) คุณสามารถใช้ function ข้างล่างนี้ได้เพื่อความสะดวก
```python
from huggingface_hub import notebook_login
notebook_login()
```
หลังจากคุณรันโค้ดข้างบน คุณจะเห็น widget ให้ล็อกอินเข้าบัญชี Hugging Face
แต่หากคุณไม่ได้ใช้ notebook ให้พิมพ์คำสั่งข้างล่างนี้ใน terminal
```bash
huggingface-cli login
```
หลังจากล็อกอินแล้ว คุณจะสามารถ push tokenizer ของคุณไปที่ Hub ได้ โดยใช้คำสั่งข้างล่างนี้
```py
tokenizer.push_to_hub("code-search-net-tokenizer")
```
คำสั่งนี้จะสร้าง repository ใหม่ในชื่อ `code-search-net-tokenizer` ใน namespace ของคุณ ซึ่ง repository นี้ก็จะเก็บไฟล์เกี่ยวกับ tokenizer ของคุณไว้ หลังจากนั้น คุณก็จะสามารถดาวน์โหลด tokenizer นี้ได้ ด้วยการใช้ `from_pretrained()`
```py
# Replace "huggingface-course" below with your actual namespace to use your own tokenizer
tokenizer = AutoTokenizer.from_pretrained("huggingface-course/code-search-net-tokenizer")
```
มาถึงตอนนี้คุณก็พร้อมแล้วที่จะเทรน และ fine-tune language model สำหรับงานที่คุณต้องการ เราจะเรียนเรื่องกันนี้ใน[บทที่ 7](/course/chapter7) แต่ในบทนี้ เราจะเรียนเกี่ยวกับ fast tokenizer ให้ละเอียดมากขึ้นและมาดูกันว่า เวลาคุณรัน `train_new_from_iterator()` มีการคำนวณอะไรเกิดขึ้นบ้าง | course/chapters/th/chapter6/2.mdx/0 | {
"file_path": "course/chapters/th/chapter6/2.mdx",
"repo_id": "course",
"token_count": 18762
} | 146 |
<FrameworkSwitchCourse {fw} />
# Giriş
<CourseFloatingBanner
chapter={3}
classNames="absolute z-10 right-0 top-0"
/>
[İkinci bölümde](/course/chapter2) tokenizer ve pretrained modelleri kullanarak nasıl tahmin yapabileceğimizi öğrendik. Fakat, kendi veri setiniz için, pretrained bir modeli nasıl kullanacaksınız ? İşte bu bölümde bunu öğreneceksiniz! Öğrenecekleriniz :
{#if fw === 'pt'}
* Hub'dan nasıl büyük bir veri seti hazırlanır
* Trainer API ile nasıl model fine-tune edilir
* Özelleştirilmiş training döngüsü nasıl yazılır
* Bu özel training döngüsünü herhangi bir dağıtılmış(distributed) kurulumda kolayca çalıştırmak için 🤗 Accelerate kütüphanesinden nasıl yararlanılır
{:else}
* Hub'dan nasıl büyük bir veri seti hazırlanır
* Keras ile nasıl model fine-tune edilir
* Keras ile tahminler nasıl elde edilir
* Özel metrikler nasıl kullanılır
{/if}
Hugging Face Hub'a eğittiğiniz model ağırlıklarını yüklemek için huggingface.co hesabına ihtiyacınız var.[hesap oluşturun](https://huggingface.co/join) | course/chapters/tr/chapter3/1.mdx/0 | {
"file_path": "course/chapters/tr/chapter3/1.mdx",
"repo_id": "course",
"token_count": 483
} | 147 |
<FrameworkSwitchCourse {fw} />
# Tokenizers
{#if fw === 'pt'}
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/vi/chapter2/section4_pt.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/vi/chapter2/section4_pt.ipynb"},
]} />
{:else}
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/vi/chapter2/section4_tf.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/vi/chapter2/section4_tf.ipynb"},
]} />
{/if}
<Youtube id="VFp38yj8h3A"/>
Tokenizer là một trong những thành phần cốt lõi của pipeline NLP. Chúng phục vụ một mục đích: dịch văn bản thành dữ liệu có thể được xử lý bởi mô hình. Mô hình chỉ có thể xử lý dạng số, do đó, các tokenizer cần phải chuyển đổi đầu vào văn bản của chúng ta thành dữ liệu số. Trong phần này, chúng ta sẽ khám phá chính xác những gì xảy ra trong đường dẫn mã hóa.
Trong các tác vụ NLP, dữ liệu thường được xử lý là văn bản thô. Đây là một ví dụ về văn bản như vậy:
```
Jim Henson was a puppeteer
```
Tuy nhiên, các mô hình chỉ có thể xử lý số, vì vậy chúng ta cần tìm cách chuyển văn bản thô thành số. Đó là những gì mà tokenizer làm, và có rất nhiều cách để thực hiện điều này. Mục tiêu đề ra là tìm ra cách biểu diễn có ý nghĩa nhất - nghĩa là cái có ý nghĩa nhất đối với mô hình - và, nếu có thể, là cách biểu diễn nhỏ nhất.
Hãy cùng xem một số ví dụ về thuật toán tokenize và cố gắng trả lời một số câu hỏi bạn có thể có về tokenize.
## Dựa trên từ
<Youtube id="nhJxYji1aho"/>
Loại tokenizer đầu tiên ta nghĩ đến đó là _dựa trên từ vựng_. Nó thường rất dễ thiết lập và sử dụng chỉ với một số quy tắc và nó thường mang lại kết quả tốt. Ví dụ: trong hình ảnh bên dưới, mục tiêu là tách văn bản thô thành các từ và tìm biểu diễn số cho mỗi từ:
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/word_based_tokenization.svg" alt="An example of word-based tokenization."/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/word_based_tokenization-dark.svg" alt="An example of word-based tokenization."/>
</div>
Có nhiều cách khác nhau để tách văn bản. Ví dụ: chúng ta có thể sử dụng khoảng trắng để tokenize văn bản thành các từ bằng cách áp dụng hàm `split()` của Python:
```py
tokenized_text = "Jim Henson was a puppeteer".split()
print(tokenized_text)
```
```python out
['Jim', 'Henson', 'was', 'a', 'puppeteer']
```
Ngoài ra còn có các biến thể của tokenize mức từ với các quy tắc bổ sung cho dấu câu. Với loại tokenizer này, chúng ta có thể đúc kết với một bộ "từ vựng" khá lớn, trong đó từ vựng được xác định bằng tổng số token độc lập mà chúng ta có trong corpus (kho ngữ liệu) của mình.
Mỗi từ được gán một ID, bắt đầu từ 0 và tăng dần theo kích thước của bộ từ vựng. Mô hình sử dụng các ID này để xác định từng từ.
Nếu chúng ta muốn bao phủ hoàn toàn một ngôn ngữ bằng tokenize mức từ, chúng ta sẽ cần phải có một chỉ số nhận dạng cho mỗi từ trong ngôn ngữ, điều này sẽ tạo ra một lượng lớn token. Ví dụ: có hơn 500,000 từ trong tiếng Anh, vì vậy để xây dựng bản đồ nối mỗi từ đến một ID đầu vào, chúng ta cần theo dõi ngần đó ID. Hơn nữa, các từ như "dog" được biểu diễn khác với các từ như "dogs", và ban đầu mô hình sẽ không có cách nào để biết rằng "dog" (chó) và "dogs" là tương tự nhau: nó sẽ xác định hai từ này không liên quan. Điều này cũng áp dụng cho các từ tương tự khác, như "run" (chạy) và "running", mà ban đầu mô hình sẽ không thấy là tương tự.
Cuối cùng, chúng ta cần một token tùy chỉnh để đại diện cho các từ không có trong vốn từ vựng của chúng ta. Mã này được gọi là token "không xác định", thường được biểu thị là "[UNK]" hoặc "<unk>". Nói chung, đó là một dấu hiệu xấu nếu bạn thấy trình tokenize đang tạo ra rất nhiều token này, vì nó không thể truy xuất một biểu hiện hợp lý của một từ và bạn đang mất thông tin trong suốt quá trình. Mục tiêu khi tạo từ vựng là làm sao cho trình tokenize mã hóa càng ít từ thành token không xác định càng tốt.
Một cách để giảm số lượng mã thông báo không xác định là đi sâu hơn xuống một cấp, sử dụng tokenize _mức kí tự_.
## Dựa trên kí tự
<Youtube id="ssLq_EK2jLE"/>
- Vốn từ vựng ít hơn nhiều.
- Có ít token ngoài bộ từ vựng (không xác định) hơn nhiều, vì mọi từ đều có thể được xây dựng từ các ký tự.
Nhưng ở đây cũng có một số câu hỏi nảy sinh liên quan đến dấu cách và các dấu câu:
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/character_based_tokenization.svg" alt="An example of character-based tokenization."/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/character_based_tokenization-dark.svg" alt="An example of character-based tokenization."/>
</div>
Cách tiếp cận này cũng không hoàn hảo. Vì biểu diễn bây giờ dựa trên các ký tự chứ không phải từ, người ta có thể lập luận rằng, theo trực giác, nó ít ý nghĩa hơn: mỗi ký tự không có nhiều ý nghĩa riêng so với trường hợp của các từ. Tuy nhiên, điều này lại khác nhau tùy theo ngôn ngữ; trong tiếng Trung, chẳng hạn, mỗi ký tự mang nhiều thông tin hơn một ký tự trong ngôn ngữ Latinh.
Một điều khác cần xem xét là chúng ta sẽ có một lượng rất lớn token sẽ được xử lý bởi mô hình của chúng ta: trong khi một từ chỉ là một token duy nhất khi tokenize dựa trên từ, nó có thể dễ dàng chuyển thành 10 token trở lên khi chuyển đổi thành các ký tự.
Để tận dụng tối đa cả hai, chúng ta có thể sử dụng kỹ thuật thứ ba kết hợp hai cách tiếp cận: _tokenize theo từ phụ_.
## Tokenize theo từ phụ
<Youtube id="zHvTiHr506c"/>
Các thuật toán token theo từ phụ dựa trên nguyên tắc rằng các từ được sử dụng thường xuyên không được chia thành các từ phụ nhỏ hơn, nhưng các từ hiếm phải được phân tách thành các từ phụ có ý nghĩa.
Ví dụ: "annoyingly" (khó chịu) có thể được coi là một từ hiếm và có thể được chuyển thành "annoying" và "ly". Cả hai đều có khả năng xuất hiện thường xuyên hơn dưới dạng các từ phụ độc lập, đồng thời nghĩa của "annoying" được giữ nguyên bởi nghĩa kết hợp của "annoying" và "ly".
Dưới đây là một ví dụ cho thấy cách một thuật toán tokenize theo từ phụ sẽ tokenize chuỗi "Let's do tokenization!" (Hãy thực hiện tokenize!):
<div class="flex justify-center">
<img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/bpe_subword.svg" alt="A subword tokenization algorithm."/>
<img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/bpe_subword-dark.svg" alt="A subword tokenization algorithm."/>
</div>
Những từ phụ này cung cấp rất nhiều ý nghĩa về mặt ngữ nghĩa: ví dụ: trong ví dụ ở trên "tokenization" được chia thành "token" và "ization", hai token đều có ý nghĩa về mặt ngữ nghĩa đồng thời tiết kiệm không gian (chỉ cần hai token để biểu thị một từ dài). Điều này cho phép chúng ta có thể bao quát tương đối tốt với các từ vựng nhỏ và gần như không có token nào không xác định.
Cách tiếp cận này đặc biệt hữu ích trong các ngôn ngữ tổng hợp như tiếng Thổ Nhĩ Kỳ, nơi bạn có thể tạo (gần như) các từ phức dài tùy ý bằng cách xâu chuỗi các từ phụ lại với nhau.
### Và hơn thế nữa!
Không có gì đáng ngạc nhiên, có rất nhiều kỹ thuật khác, có thể kể đến:
- Byte-level BPE (BPE cấp byte), như được sử dụng trong GPT-2
- WordPiece, như được sử dụng trong BERT
- SentencePiece hoặc Unigram, như được sử dụng trong một số mô hình đa ngôn ngữ
Bây giờ, bạn đã có đủ kiến thức về cách thức hoạt động của tokenize để bắt đầu với API.
## Tải và lưu
Việc tải và lưu tokenizer cũng đơn giản như với các mô hình. Trên thực tế, nó dựa trên hai phương thức giống nhau: `from_pretrained()` và `save_pretrained()`. Các phương thức này sẽ tải hoặc lưu thuật toán được sử dụng bởi tokenizer (hơi giống với _kiến trúc_ của mô hình) cũng như từ vựng của nó (hơi giống với _trọng số_ của mô hình).
Việc tải BERT tokenizer được huấn luyện với cùng một checkpoint với BERT được thực hiện giống như cách tải mô hình, ngoại trừ việc chúng ta sử dụng lớp `BertTokenizer`:
```py
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
```
{#if fw === 'pt'}
Tương tự `AutoModel`, lớp `AutoTokenizer` sẽ lấy lớp tokenizer thích hợp trong thư viện dựa trên tên checkpoint và có thể được sử dụng trực tiếp với bất kỳ checkpoint nào:
{:else}
Tương tự `TFAutoModel`, lớp `AutoTokenizer` sẽ lấy lớp tokenizer thích hợp trong thư viện dựa trên tên checkpoint và có thể được sử dụng trực tiếp với bất kỳ checkpoint nào:
{/if}
```py
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
```
Giờ chúng ta có thể sử dụng tokenizer như trong đoạn dưới đây:
```python
tokenizer("Using a Transformer network is simple")
```
```python out
{'input_ids': [101, 7993, 170, 11303, 1200, 2443, 1110, 3014, 102],
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0],
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1]}
```
Lưu một tokenizer giống như khi lưu một mô hình vậy:
```py
tokenizer.save_pretrained("directory_on_my_computer")
```
Chúng ta sẽ trao đổi thêm về `token_type_ids` trong [Chương 3](/course/chapter3), và chúng ta sẽ giải thích cơ chế của từ khoá `attention_mask` sau đó. Đầu tiênm hãy cùng xem cách `input_ids` được tạo ra. Để làm điều này, chúng ta sẽ cần xem xét các phương thức trung gian của tokenizer.
## Mã hoá
<Youtube id="Yffk5aydLzg"/>
Dịch văn bản sang số được gọi là _encoding_ hay _mã hoá_. Việc mã hóa được thực hiện theo quy trình gồm hai bước: tokenize, tiếp theo là chuyển đổi sang ID đầu vào.
Như chúng ta đã thấy, bước đầu tiên là chia văn bản thành các từ (hoặc các phần của từ,theo ký hiệu dấu câu, v.v.), thường được gọi là _token_. Có nhiều quy tắc có thể chi phối quá trình đó, đó là lý do tại sao chúng ta cần khởi tạo trình token bằng cách sử dụng tên của mô hình, để đảm bảo rằng chúng tôi sử dụng cùng các quy tắc đã được sử dụng khi mô hình được huấn luyện trước.
Bước thứ hai là chuyển đổi các token đó thành số để chúng ta có thể xây dựng một tensor từ chúng và đưa chúng vào mô hình. Để làm điều này, tokenizer có _từ vựng_, là phần chúng ta tải xuống khi khởi tạo nó bằng phương thức `from_pretrained()`. Một lần nữa, chúng ta cần sử dụng cùng một bộ từ vựng được sử dụng khi mô hình được huấn luyện trước.
Để hiểu rõ hơn về hai bước, chúng ta sẽ khám phá chúng một cách riêng biệt. Lưu ý rằng chúng tôi sẽ sử dụng một số phương pháp thực hiện các phần của pipeline tokenize riêng biệt để hiển thị cho bạn kết quả trung gian của các bước đó, nhưng trên thực tế, bạn nên gọi tokenize trực tiếp trên đầu vào của mình (như được hiển thị trong phần 2).
### Tokenize
Quá trình tokenize được thực hiện bởi phương thức `tokenize()` của tokenizer:
```py
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
sequence = "Using a Transformer network is simple"
tokens = tokenizer.tokenize(sequence)
print(tokens)
```
Kết quả của phương thức này là một danh sách các chuỗi văn bản hoặc tokens:
```python out
['Using', 'a', 'transform', '##er', 'network', 'is', 'simple']
```
Tokenizer này là một tokenizer dự theo từ phụ: nó chia các từ cho đến khi lấy được các tokens được biểu diễn bởi bộ từ vựng của nó. Ví dụ, `transformer` sẽ được chia thành hai token: `transform` và `##er`.
### Từ token tới ID đầu vào
Quá tình chuyển đổi sang ID đầu vào được thực hiện bởi `convert_tokens_to_ids()` của tokenizer:
```py
ids = tokenizer.convert_tokens_to_ids(tokens)
print(ids)
```
```python out
[7993, 170, 11303, 1200, 2443, 1110, 3014]
```
Các đầu ra này, sau khi được chuyển đổi sang khung tensor thích hợp, có thể được sử dụng làm đầu vào cho một mô hình như đã thấy ở phần trước trong chương này.
<Tip>
✏️ **Thử nghiệm thôi!** Sao chép hai bước cuối cùng (tokenize và chuyển đổi sang ID đầu vào) trên các câu đầu vào mà chúng ta đã sử dụng trong phần 2 ("I've been waiting for a HuggingFace course my whole life." và "I hate this so much!"). Kiểm tra xem bạn có nhận được các ID đầu vào giống như chúng tôi đã nhận trước đó không!
</Tip>
## Giải mã
_Decoding_ hay _giải mã_ thì ngược lại: từ các chỉ số từ vựng, ta muốn trả về một chuỗi văn bản. Điều này có thể được thực hiện với phương thức `decode()` như sau:
```py
decoded_string = tokenizer.decode([7993, 170, 11303, 1200, 2443, 1110, 3014])
print(decoded_string)
```
```python out
'Using a Transformer network is simple'
```
Lưu ý rằng phương pháp `giải mã` không chỉ chuyển đổi các chỉ số trở lại thành token, mà còn nhóm các token là một phần của cùng một từ lại với nhau để tạo ra một câu có thể đọc được. Hành vi này sẽ cực kỳ hữu ích khi chúng ta sử dụng các mô hình dự đoán văn bản mới (văn bản được tạo từ lời nhắc hoặc đối với các bài toán chuỗi-sang-chuỗi như dịch hoặc tóm tắt văn bản).
Bây giờ bạn đã hiểu các hoạt động nguyên tử mà một tokenizer có thể xử lý: tokenize, chuyển đổi sang ID và chuyển đổi ID trở lại một chuỗi. Tuy nhiên, tất cả chỉ mới là sự bắt đầu. Trong phần sau, chúng ta sẽ tiếp cận các giới hạn của nó và xem cách vượt qua chúng.
| course/chapters/vi/chapter2/4.mdx/0 | {
"file_path": "course/chapters/vi/chapter2/4.mdx",
"repo_id": "course",
"token_count": 9923
} | 148 |
# Hoàn thành phần 1!
<CourseFloatingBanner
chapter={4}
classNames="absolute z-10 right-0 top-0"
/>
Đây là mục cuối của phần đầu tiên trong khóa học! Phần 2 sẽ ra mắt vào ngày 15/11 tới đây với một sự kiện cộng đồng lớn, xem thêm thông tin [tại đây](https://huggingface.co/blog/course-launch-event).
Giờ đây, bạn có thể tinh chỉnh mô hình được huấn luyện trước về vấn đề phân loại văn bản (đơn hoặc cặp câu) và tải kết quả lên Model Hub. Để đảm bảo bạn thành thạo phần đầu tiên này, bạn nên làm chính xác phần đó đối với một vấn đề mà bạn quan tâm (và không nhất thiết phải bằng tiếng Anh nếu bạn nói một ngôn ngữ khác)! Bạn có thể tìm trợ giúp trong [diễn đàn Hugging Face](https://discuss.huggingface.co/) và chia sẻ dự án của mình trong [chủ đề này](https://discuss.huggingface.co/t/share-your-projects/6803) sau khi bạn hoàn thành.
Chúng tôi háo hức chờ đợi để xem bạn sẽ xây dựng những gì với điều này!
| course/chapters/vi/chapter4/5.mdx/0 | {
"file_path": "course/chapters/vi/chapter4/5.mdx",
"repo_id": "course",
"token_count": 726
} | 149 |
# Byte-Pair Encoding tokenization
<DocNotebookDropdown
classNames="absolute z-10 right-0 top-0"
options={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/vi/chapter6/section5.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/vi/chapter6/section5.ipynb"},
]} />
Mã hóa theo cặp (BPE) tiền thân được phát triển như một thuật toán để nén văn bản, sau đó được OpenAI sử dụng để tokenize khi huấn luyện trước mô hình GPT. Nó được sử dụng bởi rất nhiều mô hình Transformer, bao gồm GPT, GPT-2, RoBERTa, BART và DeBERTa.
<Youtube id="HEikzVL-lZU"/>
<Tip>
💡 Phần này trình bày sâu hơn về BPE, đi xa hơn nữa là trình bày cách triển khai đầy đủ. Bạn có thể bỏ qua phần cuối nếu bạn chỉ muốn có một cái nhìn tổng quan chung về thuật toán tokenize.
</Tip>
## Thuật toán huấn luyện
Huấn luyện BPE bắt đầu bằng cách tính toán tập hợp các từ duy nhất được sử dụng trong kho ngữ liệu (sau khi hoàn thành các bước chuẩn hóa và pre-tokenization), sau đó xây dựng từ vựng bằng cách lấy tất cả các ký hiệu được sử dụng để viết những từ đó. Ví dụ rất đơn giản, giả sử kho dữ liệu của chúng ta sử dụng năm từ sau:
```
"hug", "pug", "pun", "bun", "hugs"
```
Từ vựng cơ sở khi đó sẽ là `["b", "g", "h", "n", "p", "s", "u"]`. Đối với các trường hợp trong thực tế, từ vựng cơ sở đó sẽ chứa tất cả các ký tự ASCII, ít nhất và có thể là một số ký tự Unicode. Nếu một mẫu bạn đang tokenize sử dụng một ký tự không có trong kho dữ liệu huấn luyện, thì ký tự đó sẽ được chuyển đổi thành token không xác định. Đó là một lý do tại sao nhiều mô hình NLP rất kém trong việc phân tích nội dung bằng biểu tượng cảm xúc.
<Tip>
GPT-2 và RoBERTa tokenizer (khá giống nhau) có một cách thông minh để giải quyết vấn đề này: chúng không xem các từ được viết bằng các ký tự Unicode mà là các byte. Bằng cách này, từ vựng cơ sở có kích thước nhỏ (256), nhưng mọi ký tự bạn có thể nghĩ đến sẽ vẫn được bao gồm và không bị chuyển đổi thành token không xác định. Thủ thuật này được gọi là *BPE cấp byte*.
</Tip>
Sau khi có được bộ từ vựng cơ bản này, chúng ta thêm các token mới cho đến khi đạt được kích thước từ vựng mong muốn bằng cách học *hợp nhất*, đây là các quy tắc để hợp nhất hai yếu tố của từ vựng hiện có với nhau thành một từ mới. Vì vậy, lúc đầu sự hợp nhất này sẽ tạo ra các token có hai ký tự và sau đó, khi quá trình huấn luyện tiến triển, các từ phụ sẽ dài hơn.
Tại bất kỳ bước nào trong quá trình huấn luyện token, thuật toán BPE sẽ tìm kiếm cặp token hiện có thường xuyên nhất (theo "cặp", ở đây có nghĩa là hai token liên tiếp trong một từ). Cặp thường xuyên nhất đó là cặp sẽ được hợp nhất, và chúng ta xả và lặp lại cho bước tiếp theo.
Quay trở lại ví dụ trước, giả sử các từ có tần số như sau:
```
("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5)
```
nghĩa là `"hug"` có mặt 10 lần trong kho ngữ liệu, `"pug"` 5 lần, `"pun"` 12 lần, `"bun"` 4 lần và `"hug"` 5 lần. Chúng ta bắt đầu huấn luyện bằng cách tách từng từ thành các ký tự (những ký tự hình thành từ vựng ban đầu của chúng ta) để có thể xem mỗi từ như một danh sách các token:
```
("h" "u" "g", 10), ("p" "u" "g", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "u" "g" "s", 5)
```
Sau đó, chúng ta xem xét các cặp. Cặp `("h", "u")` có trong các từ `"hug"` và `"hugs"`, vì vậy tổng cộng là 15 lần trong ngữ liệu. Tuy nhiên, đây không phải là cặp thường xuyên nhất: vinh dự đó thuộc về `("u", "g")`, có trong `"hug"`, `"pug"`, và `"hugs"`, với tổng cộng 20 lần xuất hiện trong bộ từ vựng.
Do đó, quy tắc hợp nhất đầu tiên được học bởi tokenizer là `("u", "g") -> "ug"`, có nghĩa là `"ug"` sẽ được thêm vào từ vựng và cặp này sẽ được hợp nhất trong tất cả các từ của ngữ liệu. Vào cuối giai đoạn này, từ vựng và ngữ liệu sẽ giống như sau:
```
Vocabulary: ["b", "g", "h", "n", "p", "s", "u", "ug"]
Corpus: ("h" "ug", 10), ("p" "ug", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "ug" "s", 5)
```
Bây giờ chúng ta có một số cặp dẫn đến một token dài hơn hai ký tự: ví dụ: cặp `("h", "ug")`, (hiện diện 15 lần trong kho ngữ liệu). Cặp thường gặp nhất ở giai đoạn này là `("u", "n")`, xuất hiện 16 lần trong kho ngữ liệu, vì vậy quy tắc hợp nhất thứ hai đã học là `("u", "n") -> "un"`. Thêm nó vào bộ từ vựng và hợp nhất tất cả các lần xuất hiện hiện có sẽ dẫn chúng ta đến:
```
Vocabulary: ["b", "g", "h", "n", "p", "s", "u", "ug", "un"]
Corpus: ("h" "ug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("h" "ug" "s", 5)
```
Giờ thì cặp xuất hiện nhiều nhất là `("h", "ug")`, nên chúng ta hợp nhất `("h", "ug") -> "hug"`, trả về cho chúng ta token gồn ba kí tự đầu tiên. Sau sự hợp nhất này, kho ngữ liệu sẽ như sau:
```
Vocabulary: ["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"]
Corpus: ("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5)
```
Và chúng ta tiếp túc làm vậy cho đến khi chúng ta chạm đến kích thước bộ tự điển ta mong muốn.
<Tip>
✏️ **Giờ thì đến lượt bạn!** Bạn nghĩ bước hợp nhất tiếp theo sẽ là gì?
</Tip>
## Thuật toán tokenize
Tokenize tuân thủ chặt chẽ quá trình huấn luyện, theo nghĩa là các đầu vào mới được tokenize bằng cách áp dụng các bước sau:
1. Chuẩn hoá
2. Pre-tokenization
3. Tách các từ thành các ký tự riêng lẻ
4. Áp dụng các quy tắc hợp nhất đã học theo thứ tự trên các phần tách đó
Lấy ví dụ mà ta đã sử dụng trong quá trình huấn luyện, với ba quy tắc hợp nhất đã học:
```
("u", "g") -> "ug"
("u", "n") -> "un"
("h", "ug") -> "hug"
```
Từ `"bug"` sẽ được tokenize thành `["b", "ug"]`. `"mug"`, tuy nhiên, sẽ tokenize thành `["[UNK]", "ug"]` vì kí tự `"m"` không có trong bộ tự vựng gốc. Tương tự, từ `"thug"` sẽ được tokenize thành `["[UNK]", "hug"]`: kí tự `"t"` không có trong bộ tự vựng gốc, và áp dụng quy tắc hợp nhất ở `"u"` và `"g"` và sau đó `"hu"` và `"g"`.
<Tip>
✏️ **Giờ tới lượt bạn!** Bạn nghĩ rằng `"unhug"` sẽ được tokenize như thế nào?
</Tip>
## Triển khai BPE
Hãy cùng xem các thuật toán BPE được triển khai. Đây không phải là phiên bản tối ưu mà bạn có thể thực sự sử dụng cho một kho ngữ liệu lớn; chúng tôi chỉ muốn cho bạn xem đoạn mã để bạn có thể hiểu thuật toán này tốt hơn.
Đầu tiên chúng ta cần một kho ngữ liệu, vậy nên hay tạo ra một bản đơn giản với một vài câu:
```python
corpus = [
"This is the Hugging Face Course.",
"This chapter is about tokenization.",
"This section shows several tokenizer algorithms.",
"Hopefully, you will be able to understand how they are trained and generate tokens.",
]
```
Tiếp theo, ta cần tiền tokenize kho ngữ liệu này thành các từ. Vì ta đang sao chép một bản BPE tokenizer (như GPT-2), ta vẫn có thể sử dụng `gpt2` tokenize cho bước pre-tokenization:
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("gpt2")
```
Sau đó ta tính tần suất của từng từ trong kho ngữ liệu như khi làm với pre-tokenization:
```python
from collections import defaultdict
word_freqs = defaultdict(int)
for text in corpus:
words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text)
new_words = [word for word, offset in words_with_offsets]
for word in new_words:
word_freqs[word] += 1
print(word_freqs)
```
```python out
defaultdict(int, {'This': 3, 'Ġis': 2, 'Ġthe': 1, 'ĠHugging': 1, 'ĠFace': 1, 'ĠCourse': 1, '.': 4, 'Ġchapter': 1,
'Ġabout': 1, 'Ġtokenization': 1, 'Ġsection': 1, 'Ġshows': 1, 'Ġseveral': 1, 'Ġtokenizer': 1, 'Ġalgorithms': 1,
'Hopefully': 1, ',': 1, 'Ġyou': 1, 'Ġwill': 1, 'Ġbe': 1, 'Ġable': 1, 'Ġto': 1, 'Ġunderstand': 1, 'Ġhow': 1,
'Ġthey': 1, 'Ġare': 1, 'Ġtrained': 1, 'Ġand': 1, 'Ġgenerate': 1, 'Ġtokens': 1})
```
Tiếp theo chúng ta sẽ tính bộ từ vựng cơ sở từ các kí tự sử dụng trong kho ngữ liệu:
```python
alphabet = []
for word in word_freqs.keys():
for letter in word:
if letter not in alphabet:
alphabet.append(letter)
alphabet.sort()
print(alphabet)
```
```python out
[ ',', '.', 'C', 'F', 'H', 'T', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's',
't', 'u', 'v', 'w', 'y', 'z', 'Ġ']
```
Ta cũng có thể thêm các token đặc biệt từ mô hình ở đầu của bộ tự vựng. Trong trường hợp của GPT-2, token đặc biệt duy nhất đó là `"<|endoftext|>"`:
```python
vocab = ["<|endoftext|>"] + alphabet.copy()
```
Ta giờ cần phải chia mỗi từ thành các kí tự riêng lẻ để có thể bắt đầu huấn luyện
```python
splits = {word: [c for c in word] for word in word_freqs.keys()}
```
Giờ ta đã sẵn sàng để huấn luyện, hãy cùng viết một hàm tính tần suất mỗi cặp. Ta sẽ cần sử dụng nó ở bước huấn luyện:
```python
def compute_pair_freqs(splits):
pair_freqs = defaultdict(int)
for word, freq in word_freqs.items():
split = splits[word]
if len(split) == 1:
continue
for i in range(len(split) - 1):
pair = (split[i], split[i + 1])
pair_freqs[pair] += freq
return pair_freqs
```
Hãy nhìn vào một phần từ điẻn sau khi tách:
```python
pair_freqs = compute_pair_freqs(splits)
for i, key in enumerate(pair_freqs.keys()):
print(f"{key}: {pair_freqs[key]}")
if i >= 5:
break
```
```python out
('T', 'h'): 3
('h', 'i'): 3
('i', 's'): 5
('Ġ', 'i'): 2
('Ġ', 't'): 7
('t', 'h'): 3
```
Giờ thì, tìm xem cặp xuất hiện nhiều nhất bằng một vòng lặp nhanh:
```python
best_pair = ""
max_freq = None
for pair, freq in pair_freqs.items():
if max_freq is None or max_freq < freq:
best_pair = pair
max_freq = freq
print(best_pair, max_freq)
```
```python out
('Ġ', 't') 7
```
Vậy phép hợp nhất đầu tiên là `('Ġ', 't') -> 'Ġt'`, và ta thêm `'Ġt'` vào bộ từ vựng:
```python
merges = {("Ġ", "t"): "Ġt"}
vocab.append("Ġt")
```
Để tiếp tục, ta cần áp dụng sự hợp nhất ở từ điển `splits`. Hãy cùng viết một hàm khác cho nó:
```python
def merge_pair(a, b, splits):
for word in word_freqs:
split = splits[word]
if len(split) == 1:
continue
i = 0
while i < len(split) - 1:
if split[i] == a and split[i + 1] == b:
split = split[:i] + [a + b] + split[i + 2 :]
else:
i += 1
splits[word] = split
return splits
```
Giờ ta có thể nhìn xem kết quả của lần hợp nhất đầu tiên:
```py
splits = merge_pair("Ġ", "t", splits)
print(splits["Ġtrained"])
```
```python out
['Ġt', 'r', 'a', 'i', 'n', 'e', 'd']
```
Giờ thì ta có tất cả những gì mình cần để lặp cho đến khi ta học tất các các hợp nhất mà ta muốn. Hãy cũng nhắm tới bộ tự vựng có kích cỡ là 50:
```python
vocab_size = 50
while len(vocab) < vocab_size:
pair_freqs = compute_pair_freqs(splits)
best_pair = ""
max_freq = None
for pair, freq in pair_freqs.items():
if max_freq is None or max_freq < freq:
best_pair = pair
max_freq = freq
splits = merge_pair(*best_pair, splits)
merges[best_pair] = best_pair[0] + best_pair[1]
vocab.append(best_pair[0] + best_pair[1])
```
Kết quả là, chúng ta đã học 19 quy tắc hợp nhất (bộ từ điển gốc có kích cỡ là 31 tương ứng 30 kí tự trong bảng chữ cái cùng một token đặt biệt):
```py
print(merges)
```
```python out
{('Ġ', 't'): 'Ġt', ('i', 's'): 'is', ('e', 'r'): 'er', ('Ġ', 'a'): 'Ġa', ('Ġt', 'o'): 'Ġto', ('e', 'n'): 'en',
('T', 'h'): 'Th', ('Th', 'is'): 'This', ('o', 'u'): 'ou', ('s', 'e'): 'se', ('Ġto', 'k'): 'Ġtok',
('Ġtok', 'en'): 'Ġtoken', ('n', 'd'): 'nd', ('Ġ', 'is'): 'Ġis', ('Ġt', 'h'): 'Ġth', ('Ġth', 'e'): 'Ġthe',
('i', 'n'): 'in', ('Ġa', 'b'): 'Ġab', ('Ġtoken', 'i'): 'Ġtokeni'}
```
Và bộ tự vựng cấu thành bởi token đặc biết, các kí tự trong bảng chữ cái, và tất cả kết quả từ các quy tắc hợp nhất:
```py
print(vocab)
```
```python out
['<|endoftext|>', ',', '.', 'C', 'F', 'H', 'T', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'o',
'p', 'r', 's', 't', 'u', 'v', 'w', 'y', 'z', 'Ġ', 'Ġt', 'is', 'er', 'Ġa', 'Ġto', 'en', 'Th', 'This', 'ou', 'se',
'Ġtok', 'Ġtoken', 'nd', 'Ġis', 'Ġth', 'Ġthe', 'in', 'Ġab', 'Ġtokeni']
```
<Tip>
💡 Sử dụng `train_new_from_iterator()` trên cùng kho ngữ liệu sẽ không mang về kết quả kho ngữ liệu y hệt. Đó là bởi khi có sự lựa chọn về cặp có tần suất cao nhất, ta đã chọn cái đầu tiên xuất hiện, trong khi thư viện 🤗 Tokenizers chọn cái đầu tiên dựa trên ID bên trong của nó.
</Tip>
Để tokenize văn bản mới, chúng ta tiền tokenize nó, tách ra, rồi áp dụng quy tắc hợp nhất được học:
```python
def tokenize(text):
pre_tokenize_result = tokenizer._tokenizer.pre_tokenizer.pre_tokenize_str(text)
pre_tokenized_text = [word for word, offset in pre_tokenize_result]
splits = [[l for l in word] for word in pre_tokenized_text]
for pair, merge in merges.items():
for idx, split in enumerate(splits):
i = 0
while i < len(split) - 1:
if split[i] == pair[0] and split[i + 1] == pair[1]:
split = split[:i] + [merge] + split[i + 2 :]
else:
i += 1
splits[idx] = split
return sum(splits, [])
```
t
Ta có thể thử các này với bất kì đoạn văn nào khác được tạo thành từ các kí tự trong bảng chữ cái:
```py
tokenize("This is not a token.")
```
```python out
['This', 'Ġis', 'Ġ', 'n', 'o', 't', 'Ġa', 'Ġtoken', '.']
```
<Tip warning={true}>
⚠️ Các triển khai của chúng ta sẽ gặp lỗi nếu có những kí tự vô danh vì chúng ta đã không làm gì để xử lý chúng. GPT-2 không thực sự có những token vô danh (không thể có kí tự vô danh khi sử dụng BPE cấp byte), nhưng nó có thể xảy ra ở đây vì ta không bao gồm tất cả các byte có thể có trong bộ từ vựng gốc. Khía cạnh này của BPE nằm ngoài phạm vi phần này, nên chúng tôi sẽ không đi sau vào chi tiết.
</Tip>
Đó là những gì ta cần biết về thuật toán BPE! Tiếp theo, chúng ta sẽ cùng tìm hiểu về WordPiece.
| course/chapters/vi/chapter6/5.mdx/0 | {
"file_path": "course/chapters/vi/chapter6/5.mdx",
"repo_id": "course",
"token_count": 9713
} | 150 |
# Yêu cầu trợ giúp trên diễn đàn
<DocNotebookDropdown
classNames="absolute z-10 right-0 top-0"
options={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/vi/chapter8/section3.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/vi/chapter8/section3.ipynb"},
]} />
<Youtube id="S2EEG3JIt2A"/>
[Diễn đàn Hugging Face](https://discuss.huggingface.co) là nơi tuyệt vời để nhận được sự giúp đỡ từ các nhóm nguồn mở và cộng đồng Hugging Face. Trang chủ luôn trông như sau:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/forums.png" alt="The Hugging Face forums." width="100%"/>
</div>
Ở bên tay trái, bạn có thể thấy tất cả các danh mục mà các chủ đề khác nhau được nhóm lại, trong khi bên tay phải hiển thị các chủ đề gần đây nhất. Chủ đề là một bài đăng có chứa tiêu đề, danh mục và mô tả; nó khá giống với định dạng vấn đề GitHub mà chúng ta đã thấy khi tạo tập dữ liệu của riêng mình trong [Chương 5](/course/chapter5). Như tên cho thấy, danh mục [Beginners](https://discuss.huggingface.co/c/beginners/5) chủ yếu dành cho những người mới bắt đầu với hệ sinh thái và thư viện Hugging Face. Mọi câu hỏi trên bất kỳ thư viện nào đều được hoan nghênh ở đó, có thể là để gỡ lỗi một số mã hoặc để yêu cầu trợ giúp về cách thực hiện điều gì đó. (Điều đó nói rằng, nếu câu hỏi của bạn liên quan đến một thư viện cụ thể, bạn có thể nên chuyển đến danh mục thư viện tương ứng trên diễn đàn.)
Tương tự, danh mục [Intermediate](https://discuss.huggingface.co/c/intermediate/6)và [Research](https://discuss.huggingface.co/c/research/7) dành cho các câu hỏi nâng cao hơn , ví dụ về thư viện hoặc một số nghiên cứu NLP mới thú vị mà bạn muốn thảo luận.
Và đương nhiên, chúng ta cũng nên đề cập đến danh mục [Course](https://discuss.huggingface.co/c/course/20), nơi bạn có thể đặt bất kỳ câu hỏi nào liên quan đến khóa học Hugging Face!
Khi bạn đã chọn một danh mục, bạn sẽ sẵn sàng viết chủ đề đầu tiên của mình. Bạn có thể tìm thấy một số [hướng dẫn](https://discuss.huggingface.co/t/how-to-request-support/3128) trong diễn đàn về cách thực hiện việc này và trong phần này chúng ta sẽ xem xét một số tính năng tạo nên một chủ đề hay.
## Viết một bài đăng tốt trên diễn đàn
Như một ví dụ, giả sử rằng chúng ta đang cố gắng tạo các biểu diễn từ các bài viết trên Wikipedia để tạo một công cụ tìm kiếm tùy chỉnh. Như thường lệ, chúng ta tải tokenizer và mô hình như sau:
```python
from transformers import AutoTokenizer, AutoModel
model_checkpoint = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
model = AutoModel.from_pretrained(model_checkpoint)
```
Giờ giả sử ta đang cố nhúng toàn bộ phần này của [Wikipedia](https://en.wikipedia.org/wiki/Transformers) lên Transformers:
```python
text = """
Generation One is a retroactive term for the Transformers characters that
appeared between 1984 and 1993. The Transformers began with the 1980s Japanese
toy lines Micro Change and Diaclone. They presented robots able to transform
into everyday vehicles, electronic items or weapons. Hasbro bought the Micro
Change and Diaclone toys, and partnered with Takara. Marvel Comics was hired by
Hasbro to create the backstory; editor-in-chief Jim Shooter wrote an overall
story, and gave the task of creating the characthers to writer Dennis O'Neil.
Unhappy with O'Neil's work (although O'Neil created the name "Optimus Prime"),
Shooter chose Bob Budiansky to create the characters.
The Transformers mecha were largely designed by Shōji Kawamori, the creator of
the Japanese mecha anime franchise Macross (which was adapted into the Robotech
franchise in North America). Kawamori came up with the idea of transforming
mechs while working on the Diaclone and Macross franchises in the early 1980s
(such as the VF-1 Valkyrie in Macross and Robotech), with his Diaclone mechs
later providing the basis for Transformers.
The primary concept of Generation One is that the heroic Optimus Prime, the
villainous Megatron, and their finest soldiers crash land on pre-historic Earth
in the Ark and the Nemesis before awakening in 1985, Cybertron hurtling through
the Neutral zone as an effect of the war. The Marvel comic was originally part
of the main Marvel Universe, with appearances from Spider-Man and Nick Fury,
plus some cameos, as well as a visit to the Savage Land.
The Transformers TV series began around the same time. Produced by Sunbow
Productions and Marvel Productions, later Hasbro Productions, from the start it
contradicted Budiansky's backstories. The TV series shows the Autobots looking
for new energy sources, and crash landing as the Decepticons attack. Marvel
interpreted the Autobots as destroying a rogue asteroid approaching Cybertron.
Shockwave is loyal to Megatron in the TV series, keeping Cybertron in a
stalemate during his absence, but in the comic book he attempts to take command
of the Decepticons. The TV series would also differ wildly from the origins
Budiansky had created for the Dinobots, the Decepticon turned Autobot Jetfire
(known as Skyfire on TV), the Constructicons (who combine to form
Devastator),[19][20] and Omega Supreme. The Marvel comic establishes early on
that Prime wields the Creation Matrix, which gives life to machines. In the
second season, the two-part episode The Key to Vector Sigma introduced the
ancient Vector Sigma computer, which served the same original purpose as the
Creation Matrix (giving life to Transformers), and its guardian Alpha Trion.
"""
inputs = tokenizer(text, return_tensors="pt")
logits = model(**inputs).logits
```
```python output
IndexError: index out of range in self
```
Rất tiếc, chúng ta đã gặp sự cố - và thông báo lỗi khó hiểu hơn nhiều so với những thông báo chúng ta thấy trong [phần 2](/course/chapter8/section2)! Chúng ta không thể thực hiện được toàn bộ quá trình truy vết, vì vậy chúng ta quyết định chuyển sang diễn đàn Hugging Face để được trợ giúp. Làm thế nào chúng ta có thể tạo ra chủ đề?
Để bắt đầu, chúng ta cần nhấp vào nút "New Topic" hay "Chủ đề mới" ở góc trên bên phải (lưu ý rằng để tạo chủ đề, chúng ta cần đăng nhập):
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/forums-new-topic.png" alt="Creating a new forum topic." width="100%"/>
</div>
Thao tác này sẽ hiển thị một giao diện viết, nơi chúng ta có thể nhập tiêu đề của chủ đề của mình, chọn một danh mục và soạn thảo nội dung:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/forum-topic01.png" alt="The interface for creating a forum topic." width="100%"/>
</div>
Vì lỗi dường như chỉ xảy ra với 🤗 Transformers, nên chúng ta sẽ chọn lỗi này cho danh mục. Nỗ lực đầu tiên của chúng ta trong việc giải thích vấn đề có thể trông giống như sau:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/forum-topic02.png" alt="Drafting the content for a new forum topic." width="100%"/>
</div>
Mặc dù chủ đề này chứa thông báo lỗi mà chúng tôi cần trợ giúp, nhưng có một số vấn đề với cách viết:
1. Tiêu đề không mang tính mô tả cao, vì vậy bất kỳ ai duyệt diễn đàn sẽ không thể biết chủ đề là gì nếu không đọc phần nội dung.
2. Phần thân không cung cấp đủ thông tin về _nơi_ bắt nguồn lỗi và _cách_ để tạo lại lỗi đó.
3. Chủ đề gắn thẻ trực tiếp một vài người với giọng điệu hơi khắt khe.
Các chủ đề như thế này không có khả năng nhận được câu trả lời nhanh (nếu họ nhận được một câu trả lời nào đó), vì vậy hãy xem cách chúng ta có thể cải thiện nó. Chúng ta sẽ bắt đầu với vấn đề đầu tiên là chọn một tiêu đề hay.
### Choosing a descriptive title
Nếu bạn đang cố gắng nhận trợ giúp về một lỗi trong mã của mình, một nguyên tắc chung là đưa đủ thông tin vào tiêu đề để người khác có thể nhanh chóng xác định xem họ có nghĩ rằng họ có thể trả lời câu hỏi của bạn hay không. Trong ví dụ đang chạy của mình, chúng ta biết tên của ngoại lệ đang được nêu ra và có một số gợi ý rằng nó được kích hoạt trong phần truyền thẳng của mô hình, nơi chúng tôi gọi là `model(**inputs)`. Để thông báo điều này, một tiêu đề có thể có là:
> Source of IndexError in the AutoModel forward pass?
hay
> Nguồn của IndexError trong thẻ chuyển tiếp AutoModel?
Tiêu đề này cho người đọc biết bạn nghĩ rằng lỗi đến từ _đâu_ và nếu họ đã gặp phải `IndexError` trước đó, thì rất có thể họ sẽ biết cách gỡ lỗi nó. Tất nhiên, tiêu đề có thể là bất kỳ thứ gì bạn muốn và các biến thể khác như:
> Why does my model produce an IndexError?
hay
> Tại sao mô hình của tôi tạo ra một IndexError?
cũng có thể ổn. Bây giờ chúng ta đã có một tiêu đề mô tả, hãy xem cách cải thiện nội dụng phần thân bài.
### Định dạng các đoạn mã của bạn
Đọc mã nguồn đã đủ khó trong IDE, nhưng còn khó hơn khi mã được sao chép và dán dưới dạng văn bản thuần túy! May mắn thay, các diễn đàn về Hugging Face hỗ trợ việc sử dụng Markdown, vì vậy bạn nên luôn đặt các khối mã của mình bằng ba dấu gạch ngược (```) để dễ đọc hơn. Hãy làm điều này để sửa chữa thông báo lỗi - và trong khi chúng ta xử lý nó, hãy làm cho phần nội dung lịch sự hơn một chút so với phiên bản gốc của mình:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/forum-topic03.png" alt="Our revised forum topic, with proper code formatting." width="100%"/>
</div>
Như bạn có thể thấy trong ảnh chụp màn hình, việc bao bọc các khối mã trong dấu gạch ngược sẽ chuyển văn bản thô thành mã được định dạng, hoàn chỉnh với kiểu màu! Cũng lưu ý rằng các dấu gạch ngược đơn lẻ có thể được sử dụng để định dạng các biến nội tuyến, giống như chúng tôi đã làm cho `distilbert-base-unsased`. Chủ đề này có vẻ tốt hơn nhiều và với một chút may mắn, chúng ta có thể tìm thấy ai đó trong cộng đồng có thể đoán được lỗi là gì. Tuy nhiên, thay vì dựa vào may mắn, chúng ta hãy làm cho cuộc sống dễ dàng hơn bằng cách đưa vào chi tiết đầy đủ các truy vết của nó!
### Bao gồm toàn bộ truy vết
Vì dòng cuối cùng của bản truy vết thường đủ để gỡ lỗi đoạn mã của riêng bạn, nên bạn có thể chỉ cung cấp dòng đó trong chủ đề của mình để "tiết kiệm dung lượng". Mặc dù có chủ ý tốt, điều này thực sự khiến người khác có thể _khó_ gỡ lỗi vấn đề _hơn_ vì thông tin cao hơn trong bản truy xuất có thể thực sự hữu ích. Vì vậy, một phương pháp hay là sao chép và dán _toàn bộ_ dấu vết, đồng thời đảm bảo rằng nó được định dạng độc đáo. Vì những lần truy xuất này có thể mất nhiều thời gian, một số người thích hiển thị chúng sau khi họ đã giải thích mã nguồn. Làm thôi nào. Bây giờ, chủ đề diễn đàn của chúng ta trông giống như sau:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/forum-topic04.png" alt="Our example forum topic, with the complete traceback." width="100%"/>
</div>
Điều này có nhiều thông tin hơn và một người đọc cẩn thận có thể chỉ ra rằng vấn đề dường như là do chuyển một đầu vào dài vì dòng này trong bản truy xuất:
> Token indices sequence length is longer than the specified maximum sequence length for this model (583 > 512).
Tuy nhiên, chúng ta có thể khiến mọi thứ trở nên dễ dàng hơn với họ bằng cách cung cấp mã thực đã gây ra lỗi. Hãy làm điều đó ngay bây giờ.
### Cung cấp một ví dụ có thể tái tạo
Nếu bạn đã từng cố gắng gỡ lỗi đoạn mã của người khác, trước tiên có thể bạn đã cố gắng tạo lại sự cố mà họ đã báo cáo để bạn có thể bắt đầu làm việc theo cách của mình thông qua truy xuất để xác định lỗi. Nó không khác gì khi nói đến (hoặc cung cấp) hỗ trợ trên các diễn đàn, vì vậy sẽ thực sự hữu ích nếu bạn có thể cung cấp một ví dụ nhỏ mô tả lại lỗi. Một nửa thời gian, chỉ cần đi qua bài tập này sẽ giúp bạn nhận ra điều gì đang xảy ra. Trong mọi trường hợp, phần còn thiếu trong ví dụ của chúng ta là hiển thị _các đầu vào_ mà ta đã cung cấp cho mô hình. Làm điều đó cho chúng ta một cái gì đó giống như ví dụ đã hoàn thành sau:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/forum-topic05.png" alt="The final version of our forum topic." width="100%"/>
</div>
Chủ đề này hiện chứa khá nhiều thông tin và nó được viết theo cách có nhiều khả năng thu hút sự chú ý của cộng đồng và nhận được câu trả lời hữu ích. Với những hướng dẫn cơ bản này, giờ đây bạn có thể tạo các chủ đề tuyệt vời để tìm câu trả lời cho các câu hỏi về 🤗 Transformers của mình!
| course/chapters/vi/chapter8/3.mdx/0 | {
"file_path": "course/chapters/vi/chapter8/3.mdx",
"repo_id": "course",
"token_count": 8054
} | 151 |
- title: 0. 安装
sections:
- local: chapter0/1
title: 课程简介
- title: 1. Transformer 模型
sections:
- local: chapter1/1
title: 本章简介
- local: chapter1/2
title: 自然语言处理
- local: chapter1/3
title: Transformers能做什么?
- local: chapter1/4
title: Transformers 是如何工作的?
- local: chapter1/5
title: 编码器模型
- local: chapter1/6
title: 解码器模型
- local: chapter1/7
title: 序列到序列模型
- local: chapter1/8
title: 偏见和局限性
- local: chapter1/9
title: 总结
- local: chapter1/10
title: 章末小测验
quiz: 1
- title: 2. 使用 🤗 Transformers
sections:
- local: chapter2/1
title: 本章简介
- local: chapter2/2
title: 管道的内部
- local: chapter2/3
title: 模型
- local: chapter2/4
title: 标记器(Tokenizer)
- local: chapter2/5
title: 处理多个序列
- local: chapter2/6
title: 把它们放在一起
- local: chapter2/7
title: 基本用法完成!
- local: chapter2/8
title: 章末小测验
quiz: 2
- title: 3. 微调一个预训练模型
sections:
- local: chapter3/1
title: 本章简介
- local: chapter3/2
title: 预处理数据
- local: chapter3/3
title: 使用 Trainer API 或者 Keras 微调一个模型
local_fw: { pt: chapter3/3, tf: chapter3/3_tf }
- local: chapter3/4
title: 一个完成的训练过程
- local: chapter3/5
title: 微调,章节回顾!
- local: chapter3/6
title: 章末小测验
quiz: 3
- title: 4. 分享你的模型和标记器
sections:
- local: chapter4/1
title: The Hugging Face Hub
- local: chapter4/2
title: 使用预训练模型
- local: chapter4/3
title: 分享预训练的模型
- local: chapter4/4
title: 构建模型卡片
- local: chapter4/5
title: Part 1 完结!
- local: chapter4/6
title: 章末小测验
quiz: 4
- title: 5. 🤗 Datasets库
sections:
- local: chapter5/1
title: 本章简介
- local: chapter5/2
title: 如果我的数据集不在 Hub 上怎么办?
- local: chapter5/3
title: 是时候来学一下切片了
- local: chapter5/4
title: 大数据? 🤗 Datasets 来救援!
- local: chapter5/5
title: 创建自己的数据集
- local: chapter5/6
title: 使用 FAISS 进行语义搜索
- local: chapter5/7
title: 🤗 Datasets,回顾!
- local: chapter5/8
title: 章末小测验
quiz: 5
- title: 6. 🤗 Tokenizers库
sections:
- local: chapter6/1
title: 本章简介
- local: chapter6/2
title: 根据已有的tokenizer训练新的tokenizer
- local: chapter6/3
title: 快速标记器的特殊能力
- local: chapter6/3b
title: QA 管道中的快速标记器
- local: chapter6/4
title: 标准化和预标记化
- local: chapter6/5
title: 字节对编码标记化
- local: chapter6/6
title: WordPiece 标记化
- local: chapter6/7
title: Unigram标记化
- local: chapter6/8
title: 逐块地构建标记器
- local: chapter6/9
title: 标记器,回顾!
- local: chapter6/10
title: 章末小测验
quiz: 6
- title: 7. 主要的 NLP 任务
sections:
- local: chapter7/1
title: 章节简介
- local: chapter7/2
title: 标记(token)分类
- local: chapter7/3
title: 微调一个掩码(mask)语言模型
- local: chapter7/4
title: 翻译
- local: chapter7/5
title: 文本摘要
- local: chapter7/6
title: 从头开始训练因果语言模型
- local: chapter7/7
title: 问答系统
- local: chapter7/8
title: 掌握 NLP
- local: chapter7/9
title: 章节测验
quiz: 7
- title: 8. 如何寻求帮助
sections:
- local: chapter8/1
title: 章节简介
- local: chapter8/2
title: 出现错误时该怎么办
- local: chapter8/3
title: 在论坛上寻求帮助
- local: chapter8/4
title: 调试训练管道
local_fw: { pt: chapter8/4, tf: chapter8/4_tf }
- local: chapter8/5
title: 如何提出一个好的问题
- local: chapter8/6
title: Part 2 完结!
- local: chapter8/7
title: 章节测验
quiz: 8
- title: 9. 构建并分享你的模型
new: true
subtitle: 我训练了一个模型,但我该如何展示它呢?
sections:
- local: chapter9/1
title: Gradio 简介
- local: chapter9/2
title: 构建你的第一个演示
- local: chapter9/3
title: 了解接口类
- local: chapter9/4
title: 与他人分享演示
- local: chapter9/5
title: 与 Hugging Face Hub 整合
- local: chapter9/6
title: 高级界面功能
- local: chapter9/7
title: Gradio 块简介
- local: chapter9/8
title: Gradio, 回顾!
- local: chapter9/9
title: 章末测试
quiz: 9
- title: 课程活动
sections:
- local: events/2
title: Part 2 发布活动
| course/chapters/zh-CN/_toctree.yml/0 | {
"file_path": "course/chapters/zh-CN/_toctree.yml",
"repo_id": "course",
"token_count": 2720
} | 152 |
<FrameworkSwitchCourse {fw} />
# 处理多个序列 [[处理多个序列]]
{#if fw === 'pt'}
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section5_pt.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section5_pt.ipynb"},
]} />
{:else}
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section5_tf.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section5_tf.ipynb"},
]} />
{/if}
{#if fw === 'pt'}
<Youtube id="M6adb1j2jPI"/>
{:else}
<Youtube id="ROxrFOEbsQE"/>
{/if}
在上一节中,我们探讨了最简单的用例:对一个小长度的序列进行推理。然而,一些问题已经出现:
* 我们如何处理多个序列?
* 我们如何处理多个序列不同长度?
* 词汇索引是让模型正常工作的唯一输入吗?
* 是否存在序列太长的问题?
让我们看看这些问题会带来什么样的问题,以及如何使用🤗 Transformers API解决它们
## 模型需要一批输入 [[模型需要一批输入]]
在上一个练习中,您看到了序列如何转换为数字列表。让我们将此数字列表转换为张量,并将其发送到模型:
{#if fw === 'pt'}
```py
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence = "I've been waiting for a HuggingFace course my whole life."
tokens = tokenizer.tokenize(sequence)
ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = torch.tensor(ids)
# This line will fail.
model(input_ids)
```
```python out
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
```
{:else}
```py
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence = "I've been waiting for a HuggingFace course my whole life."
tokens = tokenizer.tokenize(sequence)
ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = tf.constant(ids)
# This line will fail.
model(input_ids)
```
```py out
InvalidArgumentError: Input to reshape is a tensor with 14 values, but the requested shape has 196 [Op:Reshape]
```
{/if}
哦,不!为什么失败了?“我们遵循了第2节中管道的步骤。
问题是我们向模型发送了一个序列,而🤗 Transformers模型默认情况下需要多个句子。在这里,当我们将分词器应用于一个应用程序时,我们尝试在幕后完成分词器所做的一切,但如果仔细观察,您会发现tokenizer不仅将输入ID列表转换为张量,还在其顶部添加了一个维度:
{#if fw === 'pt'}
```py
tokenized_inputs = tokenizer(sequence, return_tensors="pt")
print(tokenized_inputs["input_ids"])
```
```python out
tensor([[ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172,
2607, 2026, 2878, 2166, 1012, 102]])
```
{:else}
```py
tokenized_inputs = tokenizer(sequence, return_tensors="tf")
print(tokenized_inputs["input_ids"])
```
```py out
tf.Tensor: shape=(1, 16), dtype=int32, numpy=
array([[ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662,
12172, 2607, 2026, 2878, 2166, 1012, 102]], dtype=int32)>
```
{/if}
让我们重试并添加一个新维度:
{#if fw === 'pt'}
```py
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence = "I've been waiting for a HuggingFace course my whole life."
tokens = tokenizer.tokenize(sequence)
ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = torch.tensor([ids])
print("Input IDs:", input_ids)
output = model(input_ids)
print("Logits:", output.logits)
```
{:else}
```py
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence = "I've been waiting for a HuggingFace course my whole life."
tokens = tokenizer.tokenize(sequence)
ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = tf.constant([ids])
print("Input IDs:", input_ids)
output = model(input_ids)
print("Logits:", output.logits)
```
{/if}
我们打印输入ID以及生成的logits-以下是输出:
{#if fw === 'pt'}
```python out
Input IDs: [[ 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012]]
Logits: [[-2.7276, 2.8789]]
```
{:else}
```py out
Input IDs: tf.Tensor(
[[ 1045 1005 2310 2042 3403 2005 1037 17662 12172 2607 2026 2878
2166 1012]], shape=(1, 14), dtype=int32)
Logits: tf.Tensor([[-2.7276208 2.8789377]], shape=(1, 2), dtype=float32)
```
{/if}
*Batching* 是一次通过模型发送多个句子的行为。如果你只有一句话,你可以用一个序列构建一个批次:
```
batched_ids = [ids, ids]
```
这是一批两个相同的序列!
<Tip>
✏️ **Try it out!** 试试看!将此列表转换为张量并通过模型传递。检查您是否获得与之前相同的登录(但是只有两次)
</Tip>
批处理允许模型在输入多个句子时工作。使用多个序列就像使用单个序列构建批一样简单。不过,还有第二个问题。当你试图将两个(或更多)句子组合在一起时,它们的长度可能不同。如果您以前使用过张量,那么您知道它们必须是矩形,因此无法将输入ID列表直接转换为张量。为了解决这个问题,我们通常填充输入。
## 填充输入 [[填充输入]]
以下列表不能转换为张量:
```py no-format
batched_ids = [
[200, 200, 200],
[200, 200]
]
```
为了解决这个问题,我们将使用填充使张量具有矩形。Padding通过在值较少的句子中添加一个名为Padding token的特殊单词来确保我们所有的句子长度相同。例如,如果你有10个包含10个单词的句子和1个包含20个单词的句子,填充将确保所有句子都包含20个单词。在我们的示例中,生成的张量如下所示:
```py no-format
padding_id = 100
batched_ids = [
[200, 200, 200],
[200, 200, padding_id],
]
```
可以在tokenizer.pad_token_id中找到填充令牌ID. 让我们使用它,将我们的两句话分别发送到模型中,并分批发送到一起:
{#if fw === 'pt'}
```py no-format
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence1_ids = [[200, 200, 200]]
sequence2_ids = [[200, 200]]
batched_ids = [
[200, 200, 200],
[200, 200, tokenizer.pad_token_id],
]
print(model(torch.tensor(sequence1_ids)).logits)
print(model(torch.tensor(sequence2_ids)).logits)
print(model(torch.tensor(batched_ids)).logits)
```
```python out
tensor([[ 1.5694, -1.3895]], grad_fn=<AddmmBackward>)
tensor([[ 0.5803, -0.4125]], grad_fn=<AddmmBackward>)
tensor([[ 1.5694, -1.3895],
[ 1.3373, -1.2163]], grad_fn=<AddmmBackward>)
```
{:else}
```py no-format
model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint)
sequence1_ids = [[200, 200, 200]]
sequence2_ids = [[200, 200]]
batched_ids = [
[200, 200, 200],
[200, 200, tokenizer.pad_token_id],
]
print(model(tf.constant(sequence1_ids)).logits)
print(model(tf.constant(sequence2_ids)).logits)
print(model(tf.constant(batched_ids)).logits)
```
```py out
tf.Tensor([[ 1.5693678 -1.3894581]], shape=(1, 2), dtype=float32)
tf.Tensor([[ 0.5803005 -0.41252428]], shape=(1, 2), dtype=float32)
tf.Tensor(
[[ 1.5693681 -1.3894582]
[ 1.3373486 -1.2163193]], shape=(2, 2), dtype=float32)
```
{/if}
我们批处理预测中的logits有点问题:第二行应该与第二句的logits相同,但我们得到了完全不同的值!
这是因为Transformer模型的关键特性是关注层,它将每个标记上下文化。这些将考虑填充标记,因为它们涉及序列中的所有标记。为了在通过模型传递不同长度的单个句子时,或者在传递一批应用了相同句子和填充的句子时获得相同的结果,我们需要告诉这些注意层忽略填充标记。这是通过使用 attention mask来实现的。
## 注意力面具 [[注意力面具]]
*Attention masks*是与输入ID张量形状完全相同的张量,用0和1填充:1s表示应注意相应的标记,0s表示不应注意相应的标记(即,模型的注意力层应忽略它们)。
让我们用attention mask完成上一个示例:
{#if fw === 'pt'}
```py no-format
batched_ids = [
[200, 200, 200],
[200, 200, tokenizer.pad_token_id],
]
attention_mask = [
[1, 1, 1],
[1, 1, 0],
]
outputs = model(torch.tensor(batched_ids), attention_mask=torch.tensor(attention_mask))
print(outputs.logits)
```
```python out
tensor([[ 1.5694, -1.3895],
[ 0.5803, -0.4125]], grad_fn=<AddmmBackward>)
```
{:else}
```py no-format
batched_ids = [
[200, 200, 200],
[200, 200, tokenizer.pad_token_id],
]
attention_mask = [
[1, 1, 1],
[1, 1, 0],
]
outputs = model(tf.constant(batched_ids), attention_mask=tf.constant(attention_mask))
print(outputs.logits)
```
```py out
tf.Tensor(
[[ 1.5693681 -1.3894582 ]
[ 0.5803021 -0.41252586]], shape=(2, 2), dtype=float32)
```
{/if}
现在我们得到了该批中第二个句子的相同登录。
请注意,第二个序列的最后一个值是一个填充ID,它在attention mask中是一个0值。
<Tip>
✏️ 试试看!在第2节中使用的两个句子上手动应用标记化(“我一生都在等待拥抱课程。”和“我非常讨厌这个!”)。通过模型传递它们,并检查您是否获得与第2节中相同的登录。现在使用填充标记将它们批处理在一起,然后创建适当的注意掩码。检查通过模型时是否获得相同的结果!
</Tip>
## 长序列 [[长序列]]
对于Transformers模型,我们可以通过模型的序列长度是有限的。大多数模型处理多达512或1024个令牌的序列,当要求处理更长的序列时,会崩溃。此问题有两种解决方案:
* 使用支持的序列长度较长的模型。
* 截断序列。
模型有不同的支持序列长度,有些模型专门处理很长的序列。
[Longformer](https://huggingface.co/transformers/model_doc/longformer.html)
这是一个例子,另一个是
[LED](https://huggingface.co/transformers/model_doc/led.html)
. 如果您正在处理一项需要很长序列的任务,我们建议您查看这些模型。
否则,我们建议您通过指定max_sequence_length参数:
```py
sequence = sequence[:max_sequence_length]
```
| course/chapters/zh-CN/chapter2/5.mdx/0 | {
"file_path": "course/chapters/zh-CN/chapter2/5.mdx",
"repo_id": "course",
"token_count": 6034
} | 153 |
<FrameworkSwitchCourse {fw} />
<!-- DISABLE-FRONTMATTER-SECTIONS -->
# 章末小测试 [[章末小测验]]
<CourseFloatingBanner
chapter={4}
classNames="absolute z-10 right-0 top-0"
/>
让我们测试一下你在本章所学的知识!
### 1.Hub上的模型有什么限制?
<Question
choices={[
{
text: "模型来自🤗 Transformers 库。",
explain: "虽然 Hugging Face Hub 支持 🤗 Transformers 库中的模型,但它们并不是唯一的模型!"
},
{
text: "所有型号都有类似于🤗 Transformers 的接口。",
explain: "将模型上传到 Hugging Face Hub 时,没有设置接口要求。"
},
{
text: "没有限制。",
explain: "对! 在将模型上传到Hub时没有限制。",
correct: true
},
{
text: "在某种程度上与 NLP 相关的模型。",
explain: "对应用领域没有要求!"
}
]}
/>
### 2.如何管理Hub上的模型?
<Question
choices={[
{
text: "通过 GCP 帐户。",
explain: "不对!"
},
{
text: "通过点对点分布。",
explain: "不对!"
},
{
text: "通过 git 和 git-lfs。",
explain: "正确! Hub上的模型是简单的Git存储库,利用Git lfs处理大型文件。",
correct: true
}
]}
/>
### 3.你能使用Hugging Face Hub网页接口做什么?
<Question
choices={[
{
text: "分叉现有的存储库。",
explain: "在Hugging Face Hub上无法分叉存储库。"
},
{
text: "创建一个新的模型库。",
explain: "没错! 不过,这并不是你能做的全部。",
correct: true
},
{
text: "管理和编辑文件。",
explain: "正确! 不过,这并不是你能做的全部",
correct: true
},
{
text: "上传文件。",
explain: "对! 但这还不是全部。",
correct: true
},
{
text: "看看不同版本的差异。",
explain: "没错! 不过,这并不是你能做的全部。",
correct: true
}
]}
/>
### 4.模型卡是什么?
<Question
choices={[
{
text: "模型的粗略描述,因此不如模型和标记器文件重要。",
explain: "这确实是对模型的描述,但它是一个重要的部分: 如果它不完整或缺乏模型的实用性大幅减少。"
},
{
text: "一种确保再现性、可重用性和公平性的方法。",
explain: "正确!在模型卡片中共享正确的信息将帮助用户利用您的模型,并了解其局限性和偏见。",
correct: true
},
{
text: "一个 Python 文件,可以运行它来检索有关模型的信息。",
explain: "模型卡片是简单的 Markdown 文件。"
}
]}
/>
### 5.哪些🤗 Transformers 库的对象可以直接在 Hub 上通过push _ to _ Hub ()共享?
{#if fw === 'pt'}
<Question
choices={[
{
text: "一个标记器",
explain: "正确! 所有标记器都有push_to_hub方法,使用该方法将把所有标记器文件(词汇表、标记器的架构等)推送到给定的存储库。不过,这不是唯一正确的答案!",
correct: true
},
{
text: "模型配置",
explain: "对!所有模型配置都有 push _ to _ hub 方法,使用这个方法可以将它们推送到给定的存储库。你还有其他能共享的吗?",
correct: true
},
{
text: "一个模型",
explain: "正确! 所有模型都有 push_to_hub 方法,使用它会将它们及其配置文件推送到给定的存储库。不过,这并不是您可以共享的全部内容。",
correct: true
},
{
text: "Trainer",
explain: "没错————Trainer也实现了push _ to _ hub方法,并且使用它将模型、配置、标记器和模型卡草稿上传到给定的存储器。试试其他答案!",
correct: true
}
]}
/>
{:else}
<Question
choices={[
{
text: "一个标记器",
explain: "正确! 所有标记器都有push_to_hub方法,使用该方法将把所有标记器文件(词汇表、标记器的架构等)推送到给定的存储库。不过,这不是唯一正确的答案!",
correct: true
},
{
text: "模型配置",
explain: "对!所有模型配置都有 push _ to _ hub 方法,使用这个方法可以将它们推送到给定的存储库。你还有其他能共享的吗?",
correct: true
},
{
text: "一个模型",
explain: "正确! 所有模型都有 push_to_hub 方法,使用它会将它们及其配置文件推送到给定的存储库。不过,这并不是您可以共享的全部内容。",
correct: true
},
{
text: "以上都有专用的回调函数",
explain: "正确————在训练期间,PushToHubCallback会定期将所有这些对象发送到存储器。",
correct: true
}
]}
/>
{/if}
### 6.当使用push _ to _ hub ()方法或 CLI 工具时,第一步是什么?
<Question
choices={[
{
text: "登录网站。",
explain: "这对你的本地机器没有帮助。"
},
{
text: "在终端中运行'huggingface-cli login'。",
explain: "正确————这将下载并缓存您的个人标记。",
correct: true
},
{
text: "在笔记本中运行'notebook _ login ()'。",
explain: "正确————这将显示一个小部件,让您进行身份验证。",
correct: true
},
]}
/>
### 7.您正在使用一个模型和一个标记器————如何将它们上传到 Hub?
<Question
choices={[
{
text: "通过直接在模型和标记器上调用 push _ to _ hub 方法。",
explain: "正确!",
correct: true
},
{
text: "在 Python 运行时中,将它们包装在huggingface _ hub实用程序中。",
explain: "模型和标记器已经受益于 < code > huggingface _ hub </code > 实用程序: 不需要额外的包装!"
},
{
text: "将它们保存到磁盘并调用 < code > transformers-cli upload-model </code >",
explain: "命令 < code > upload-model </code > 不存在。"
}
]}
/>
### 8.您可以使用'Repository'类执行哪些 git 操作?
<Question
choices={[
{
text: "提交。",
explain: "正确,< code >git _ commit () </ > 方法就是为此而存在的。",
correct: true
},
{
text: "拉一下",
explain: "这就是 < code > git _ pull () </code > 方法的目的。",
correct: true
},
{
text: "推一下",
explain: "方法 < code > git _ push () </code > 可以做到这一点。",
correct: true
},
{
text: "合并",
explain: "不,这个操作在这个 API 中是不可能的。"
}
]}
/>
| course/chapters/zh-CN/chapter4/6.mdx/0 | {
"file_path": "course/chapters/zh-CN/chapter4/6.mdx",
"repo_id": "course",
"token_count": 4152
} | 154 |
# WordPiece 标记化 [[WordPiece 标记化]]
<CourseFloatingBanner chapter={6}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter6/section6.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter6/section6.ipynb"},
]} />
WordPiece 是 Google 为预训练 BERT 而开发的标记化算法。此后,它在不少基于 BERT 的 Transformer 模型中得到重用,例如 DistilBERT、MobileBERT、Funnel Transformers 和 MPNET。它在训练方面与 BPE 非常相似,但实际标记化的方式不同。
<Youtube id="qpv6ms_t_1A"/>
<Tip>
💡 本节深入介绍 WordPiece,甚至展示完整的实现。如果您只想大致了解标记化算法,可以跳到最后。
</Tip>
## 训练算法 [[训练算法]]
<Tip warning={true}>
⚠️ Google 从未开源 WordPiece 训练算法的实现,因此以下是我们基于已发表文献的最佳猜测。它可能不是 100% 准确的。
</Tip>
与 BPE 一样,WordPiece 从一个小词汇表开始,包括模型使用的特殊标记和初始字母表。因为它通过添加前缀来识别子词 (如同 `##` 对于 BERT),每个单词最初是通过将该前缀添加到单词内的所有字符来拆分的。所以,例如 `"word"` ,像这样拆分:
```
w ##o ##r ##d
```
因此,初始字母表包含出现在单词开头的所有字符以及出现在单词内部的以 WordPiece 前缀开头的字符。
然后,再次像 BPE 一样,WordPiece 学习合并规则。主要区别在于选择要合并的对的方式。WordPiece 不是选择最频繁的对,而是使用以下公式计算每对的分数:
$$\mathrm{score} = (\mathrm{freq\_of\_pair}) / (\mathrm{freq\_of\_first\_element} \times \mathrm{freq\_of\_second\_element})$$
通过将配对的频率除以其每个部分的频率的乘积, 该算法优先合并单个部分在词汇表中频率较低的对。例如,它不一定会合并 `("un", "##able")` 即使这对在词汇表中出现的频率很高,因为 `"un"` 和 `"##able"` 很可能每个词都出现在很多其他词中并且出现频率很高。相比之下,像 `("hu", "##gging")` 可能会更快地合并 (假设 "hugging" 经常出现在词汇表中),因为 `"hu"` 和 `"##gging"` 这两个词单独出现地频率可能较低。
让我们看看我们在 BPE 训练示例中使用的相同词汇:
```
("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5)
```
这里的拆分将是:
```
("h" "##u" "##g", 10), ("p" "##u" "##g", 5), ("p" "##u" "##n", 12), ("b" "##u" "##n", 4), ("h" "##u" "##g" "##s", 5)
```
所以最初的词汇将是 `["b", "h", "p", "##g", "##n", "##s", "##u"]` (如果我们暂时忘记特殊标记)。最频繁的一对是 `("##u", "##g")` (目前20次),但 `"##u"` 单独出现的频率非常高,所以它的分数不是最高的(它是 1 / 36)。所有带有 `"##u"` 的对实际上都有相同的分数(1 / 36),所以分数最高的对是 `("##g", "##s")` -- 唯一没有 `"##u"` 的对-- 1 / 20,所以学习的第一个合并是 `("##g", "##s") -> ("##gs")`。
请注意,当我们合并时,我们删除了两个标记之间的 `##`,所以我们添加 `"##gs"` 到词汇表中,并在语料库的单词中应用该合并:
```
Vocabulary: ["b", "h", "p", "##g", "##n", "##s", "##u", "##gs"]
Corpus: ("h" "##u" "##g", 10), ("p" "##u" "##g", 5), ("p" "##u" "##n", 12), ("b" "##u" "##n", 4), ("h" "##u" "##gs", 5)
```
在这一点中, `"##u"` 是在所有可能的对中,因此它们最终都具有相同的分数。假设在这种情况下,第一对被合并, `("h", "##u") -> "hu"`。这使得我们:
```
Vocabulary: ["b", "h", "p", "##g", "##n", "##s", "##u", "##gs", "hu"]
Corpus: ("hu" "##g", 10), ("p" "##u" "##g", 5), ("p" "##u" "##n", 12), ("b" "##u" "##n", 4), ("hu" "##gs", 5)
```
然后下一个最高的分数由 `("hu", "##g")` 和 `("hu", "##gs")` 共享(1/15,与其他所有对的 1/21 相比),因此合并得分最高的第一对:
```
Vocabulary: ["b", "h", "p", "##g", "##n", "##s", "##u", "##gs", "hu", "hug"]
Corpus: ("hug", 10), ("p" "##u" "##g", 5), ("p" "##u" "##n", 12), ("b" "##u" "##n", 4), ("hu" "##gs", 5)
```
我们继续这样处理,直到达到我们所需的词汇量。
<Tip>
✏️ **现在轮到你了!** 下一个合并规则是什么?
</Tip>
## 标记化算法 [[标记化算法]]
WordPiece 和 BPE 中的标记化的不同在于 WordPiece 只保存最终词汇,而不是学习的合并规则。从要标记的单词开始,WordPiece 找到词汇表中最长的子词,然后对其进行拆分。例如,如果我们使用上面例子中学到的词汇,对于单词 `"hugs"`,词汇表中从头开始的最长子词是 `"hug"`,所以我们在那里拆分并得到 `["hug", "##s"]`。 然后我们继续使用词汇表中的 `"##s"`,因此 `"hugs"` 的标记化是 `["hug", "##s"]`.
使用 BPE, 我们将按顺序应用学习到的合并并将其标记为 `["hu", "##gs"]`,所以编码不同。
再举一个例子,让我们看看 `"bugs"` 将如何被标记化。 `"b"` 是从词汇表中单词开头开始的最长子词,所以我们在那里拆分并得到 `["b", "##ugs"]`。然后 `"##u"` 是词汇表中从 `"##ugs"` 开始的最长的子词,所以我们在那里拆分并得到 `["b", "##u, "##gs"]`。最后, `"##gs"` 在词汇表中,所以最后一个列表是 `"bugs"` 的标记化。
当分词达到无法在词汇表中找到子词的阶段时, 整个词被标记为未知 -- 例如, `"mug"` 将被标记为 `["[UNK]"]`,就像 `"bum"` (即使我们可以以 `"b"` 和 `"##u"` 开始, `"##m"` 不在词汇表中,由此产生的标记将只是 `["[UNK]"]`, 不是 `["b", "##u", "[UNK]"]`)。这是与 BPE 的另一个区别,BPE 只会将不在词汇表中的单个字符分类为未知。
<Tip>
✏️ **现在轮到你了!** `"pugs"` 将被如何标记?
</Tip>
## 实现 WordPiece [[实现 WordPiece]]
现在让我们看一下 WordPiece 算法的实现。与 BPE 一样,这只是教学,你将无法在大型语料库中使用它。
我们将使用与 BPE 示例中相同的语料库:
```python
corpus = [
"This is the Hugging Face Course.",
"This chapter is about tokenization.",
"This section shows several tokenizer algorithms.",
"Hopefully, you will be able to understand how they are trained and generate tokens.",
]
```
首先,我们需要将语料库预先标记为单词。由于我们正在复制 WordPiece 标记器 (如 BERT),因此我们将使用 `bert-base-cased` 标记器用于预标记化:
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
```
然后我们在进行预标记化时计算语料库中每个单词的频率:
```python
from collections import defaultdict
word_freqs = defaultdict(int)
for text in corpus:
words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text)
new_words = [word for word, offset in words_with_offsets]
for word in new_words:
word_freqs[word] += 1
word_freqs
```
```python out
defaultdict(
int, {'This': 3, 'is': 2, 'the': 1, 'Hugging': 1, 'Face': 1, 'Course': 1, '.': 4, 'chapter': 1, 'about': 1,
'tokenization': 1, 'section': 1, 'shows': 1, 'several': 1, 'tokenizer': 1, 'algorithms': 1, 'Hopefully': 1,
',': 1, 'you': 1, 'will': 1, 'be': 1, 'able': 1, 'to': 1, 'understand': 1, 'how': 1, 'they': 1, 'are': 1,
'trained': 1, 'and': 1, 'generate': 1, 'tokens': 1})
```
正如我们之前看到的,字母表是由单词的所有第一个字母组成的唯一集合,以及出现在前缀为 `##` 的其他字母:
```python
alphabet = []
for word in word_freqs.keys():
if word[0] not in alphabet:
alphabet.append(word[0])
for letter in word[1:]:
if f"##{letter}" not in alphabet:
alphabet.append(f"##{letter}")
alphabet.sort()
alphabet
print(alphabet)
```
```python out
['##a', '##b', '##c', '##d', '##e', '##f', '##g', '##h', '##i', '##k', '##l', '##m', '##n', '##o', '##p', '##r', '##s',
'##t', '##u', '##v', '##w', '##y', '##z', ',', '.', 'C', 'F', 'H', 'T', 'a', 'b', 'c', 'g', 'h', 'i', 's', 't', 'u',
'w', 'y']
```
我们还在该词汇表的开头添加了模型使用的特殊标记。在使用 BERT 的情况下,它是列表 `["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"]`:
```python
vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] + alphabet.copy()
```
接下来我们需要拆分每个单词, 所有不是第一个字母的字母都以 `##` 为前缀:
```python
splits = {
word: [c if i == 0 else f"##{c}" for i, c in enumerate(word)]
for word in word_freqs.keys()
}
```
现在我们已经准备好训练了,让我们编写一个函数来计算每对的分数。我们需要在训练的每个步骤中使用它:
```python
def compute_pair_scores(splits):
letter_freqs = defaultdict(int)
pair_freqs = defaultdict(int)
for word, freq in word_freqs.items():
split = splits[word]
if len(split) == 1:
letter_freqs[split[0]] += freq
continue
for i in range(len(split) - 1):
pair = (split[i], split[i + 1])
letter_freqs[split[i]] += freq
pair_freqs[pair] += freq
letter_freqs[split[-1]] += freq
scores = {
pair: freq / (letter_freqs[pair[0]] * letter_freqs[pair[1]])
for pair, freq in pair_freqs.items()
}
return scores
```
让我们来看看这个字典在初始拆分后的一部分:
```python
pair_scores = compute_pair_scores(splits)
for i, key in enumerate(pair_scores.keys()):
print(f"{key}: {pair_scores[key]}")
if i >= 5:
break
```
```python out
('T', '##h'): 0.125
('##h', '##i'): 0.03409090909090909
('##i', '##s'): 0.02727272727272727
('i', '##s'): 0.1
('t', '##h'): 0.03571428571428571
('##h', '##e'): 0.011904761904761904
```
现在,找到得分最高的对只需要一个快速循环:
```python
best_pair = ""
max_score = None
for pair, score in pair_scores.items():
if max_score is None or max_score < score:
best_pair = pair
max_score = score
print(best_pair, max_score)
```
```python out
('a', '##b') 0.2
```
所以第一个要学习的合并是 `('a', '##b') -> 'ab'`, 并且我们添加 `'ab'` 到词汇表中:
```python
vocab.append("ab")
```
要继续接下来的步骤,我们需要在我们的 `拆分` 字典中应用该合并。让我们为此编写另一个函数:
```python
def merge_pair(a, b, splits):
for word in word_freqs:
split = splits[word]
if len(split) == 1:
continue
i = 0
while i < len(split) - 1:
if split[i] == a and split[i + 1] == b:
merge = a + b[2:] if b.startswith("##") else a + b
split = split[:i] + [merge] + split[i + 2 :]
else:
i += 1
splits[word] = split
return splits
```
我们可以看看第一次合并的结果:
```py
splits = merge_pair("a", "##b", splits)
splits["about"]
```
```python out
['ab', '##o', '##u', '##t']
```
现在我们有了循环所需的一切,直到我们学会了我们想要的所有合并。我们的目标词汇量为70:
```python
vocab_size = 70
while len(vocab) < vocab_size:
scores = compute_pair_scores(splits)
best_pair, max_score = "", None
for pair, score in scores.items():
if max_score is None or max_score < score:
best_pair = pair
max_score = score
splits = merge_pair(*best_pair, splits)
new_token = (
best_pair[0] + best_pair[1][2:]
if best_pair[1].startswith("##")
else best_pair[0] + best_pair[1]
)
vocab.append(new_token)
```
然后我们可以查看生成的词汇表:
```py
print(vocab)
```
```python out
['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]', '##a', '##b', '##c', '##d', '##e', '##f', '##g', '##h', '##i', '##k',
'##l', '##m', '##n', '##o', '##p', '##r', '##s', '##t', '##u', '##v', '##w', '##y', '##z', ',', '.', 'C', 'F', 'H',
'T', 'a', 'b', 'c', 'g', 'h', 'i', 's', 't', 'u', 'w', 'y', 'ab','##fu', 'Fa', 'Fac', '##ct', '##ful', '##full', '##fully',
'Th', 'ch', '##hm', 'cha', 'chap', 'chapt', '##thm', 'Hu', 'Hug', 'Hugg', 'sh', 'th', 'is', '##thms', '##za', '##zat',
'##ut']
```
正如我们所看到的,与 BPE 相比,这个标记器将单词的一部分作为标记学习得更快一些。
<Tip>
💡 在同一语料库上使用 `train_new_from_iterator()` 不会产生完全相同的词汇表。这是因为 🤗 Tokenizers 库没有为训练实现 WordPiece(因为我们不完全确定它的内部结构),而是使用 BPE。
</Tip>
为了对新文本进行分词,我们对其进行预分词、拆分,然后对每个单词应用分词算法。也就是说,我们从第一个词的开头寻找最大的子词并将其拆分,然后我们在第二部分重复这个过程,对于该词的其余部分和文本中的以下词,依此类推:
```python
def encode_word(word):
tokens = []
while len(word) > 0:
i = len(word)
while i > 0 and word[:i] not in vocab:
i -= 1
if i == 0:
return ["[UNK]"]
tokens.append(word[:i])
word = word[i:]
if len(word) > 0:
word = f"##{word}"
return tokens
```
让我们用词汇表中的一个单词和另一个不在词汇表中的单词进行测试:
```python
print(encode_word("Hugging"))
print(encode_word("HOgging"))
```
```python out
['Hugg', '##i', '##n', '##g']
['[UNK]']
```
现在,让我们编写一个标记文本的函数:
```python
def tokenize(text):
pre_tokenize_result = tokenizer._tokenizer.pre_tokenizer.pre_tokenize_str(text)
pre_tokenized_text = [word for word, offset in pre_tokenize_result]
encoded_words = [encode_word(word) for word in pre_tokenized_text]
return sum(encoded_words, [])
```
我们可以在任何文本上尝试:
```python
tokenize("This is the Hugging Face course!")
```
```python out
['Th', '##i', '##s', 'is', 'th', '##e', 'Hugg', '##i', '##n', '##g', 'Fac', '##e', 'c', '##o', '##u', '##r', '##s',
'##e', '[UNK]']
```
这就是 WordPiece 算法的全部内容!现在让我们来看看 Unigram。
| course/chapters/zh-CN/chapter6/6.mdx/0 | {
"file_path": "course/chapters/zh-CN/chapter6/6.mdx",
"repo_id": "course",
"token_count": 8105
} | 155 |
<FrameworkSwitchCourse {fw} />
# 调试训练管道 [[调试训练管道]]
<DocNotebookDropdown
classNames="absolute z-10 right-0 top-0"
options={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/chapter8/section4.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/chapter8/section4.ipynb"},
]} />
你已经编写了一个漂亮的脚本来训练或微调给定任务的模型,尽职尽责地遵循 [Chapter 7](/course/chapter7) 中的建议。 但是当你启动命令 `trainer.train()` 时,可怕的事情发生了:你得到一个错误😱! 或者更糟糕的是,一切似乎都很好,训练运行没有错误,但生成的模型很糟糕。 在本节中,我们将向您展示如何调试此类问题。
## 调试训练管道 [[调试训练管道]]
<Youtube id="L-WSwUWde1U"/>
当您在 `trainer.train()` 中遇到错误时,它可能来自多个来源,因为 `Trainer` 通常会将很多东西放在一起组合运行。 它将datasets转换为dataloaders,因此问题可能出在datasets中,或者在尝试将datasets的元素一起批处理时出现问题。 然后它需要准备一批数据并将其提供给模型,因此问题可能出在模型代码中。 之后,它会计算梯度并执行优化器,因此问题也可能出在您的优化器中。 即使训练一切顺利,如果您的评估指标有问题,评估期间仍然可能出现问题。
调试 `trainer.train()` 中出现的错误的最佳方法是手动检查整个管道,看看哪里出了问题。 错误通常很容易解决。
为了证明这一点,我们将使用以下脚本(尝试)在 [MNLI 数据集](https://huggingface.co/datasets/glue)上微调 DistilBERT 模型:
```py
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
TrainingArguments,
Trainer,
)
raw_datasets = evaluate.load("glue", "mnli")
model_checkpoint = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
def preprocess_function(examples):
return tokenizer(examples["premise"], examples["hypothesis"], truncation=True)
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint)
args = TrainingArguments(
f"distilbert-finetuned-mnli",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=2e-5,
num_train_epochs=3,
weight_decay=0.01,
)
metric = evaluate.load("glue", "mnli")
def compute_metrics(eval_pred):
predictions, labels = eval_pred
return metric.compute(predictions=predictions, references=labels)
trainer = Trainer(
model,
args,
train_dataset=raw_datasets["train"],
eval_dataset=raw_datasets["validation_matched"],
compute_metrics=compute_metrics,
)
trainer.train()
```
如果你尝试执行它,你会遇到一个相当神秘的错误:
```python out
'ValueError: You have to specify either input_ids or inputs_embeds'
```
### 检查数据 [[检查数据]]
这是不言而喻的,如果你的数据被破坏,“Trainer”将无法形成批次,更不用说训练你的模型了。 所以首先,你需要看看你的训练集中有什么。
为了避免花费无数小时试图检查和修复不是错误来源的东西,我们建议您使用 `trainer.train_dataset` 进行检查。 所以让我们在这里这样做:
```py
trainer.train_dataset[0]
```
```python out
{'hypothesis': 'Product and geography are what make cream skimming work. ',
'idx': 0,
'label': 1,
'premise': 'Conceptually cream skimming has two basic dimensions - product and geography.'}
```
你注意到有什么不对吗? 与缺少有关 `input_ids` 的错误消息相结合,应该让您意识到数据集里是文本,而不是模型可以理解的数字。 在这个例子,输出的原始错误信息非常具有误导性,因为 `Trainer` 会自动删除与模型签名不匹配的列(即模型预期的参数)。 这意味着在这里,除了标签之外的所有东西都被丢弃了。 因此,创建批次然后将它们发送到模型没有问题,而模型又抱怨它没有收到正确的输入。
为什么没有处理数据生成标记呢? 我们确实在数据集上使用了“Dataset.map()”方法来对每个样本应用标记器。 但是如果你仔细看代码,你会发现我们在将训练和评估集传递给`Trainer`时犯了一个错误。 我们在这里没有使用 `tokenized_datasets`,而是使用了 `raw_datasets` 🤦。 所以让我们解决这个问题!
```py
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
TrainingArguments,
Trainer,
)
raw_datasets = evaluate.load("glue", "mnli")
model_checkpoint = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
def preprocess_function(examples):
return tokenizer(examples["premise"], examples["hypothesis"], truncation=True)
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint)
args = TrainingArguments(
f"distilbert-finetuned-mnli",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=2e-5,
num_train_epochs=3,
weight_decay=0.01,
)
metric = evaluate.load("glue", "mnli")
def compute_metrics(eval_pred):
predictions, labels = eval_pred
return metric.compute(predictions=predictions, references=labels)
trainer = Trainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation_matched"],
compute_metrics=compute_metrics,
)
trainer.train()
```
这个新代码现在会给出一个不同的错误:
```python out
'ValueError: expected sequence of length 43 at dim 1 (got 37)'
```
查看traceback,我们可以看到错误发生在数据整理步骤中:
```python out
~/git/transformers/src/transformers/data/data_collator.py in torch_default_data_collator(features)
105 batch[k] = torch.stack([f[k] for f in features])
106 else:
--> 107 batch[k] = torch.tensor([f[k] for f in features])
108
109 return batch
```
所以,我们应该去研究一下那个。 然而,在我们这样做之前,让我们完成检查我们的数据, 先确定它100%是正确的。
在调试课程的内容时,您应该始终做的一件事是查看模型的解码输入。 我们无法理解直接提供给它的数字,所以我们应该看看这些数字代表什么。 例如,在计算机视觉中,这意味着查看您传递的图片像素的解码,在语音中意味着解码后的音频样本,对于我们的 NLP 示例,这意味着使用我们的标记器解码的输入:
```py
tokenizer.decode(trainer.train_dataset[0]["input_ids"])
```
```python out
'[CLS] conceptually cream skimming has two basic dimensions - product and geography. [SEP] product and geography are what make cream skimming work. [SEP]'
```
所以这似乎是正确的。 您应该对输入中的所有键执行此操作:
```py
trainer.train_dataset[0].keys()
```
```python out
dict_keys(['attention_mask', 'hypothesis', 'idx', 'input_ids', 'label', 'premise'])
```
请注意,与模型接受的输入不对应的键将被自动丢弃,因此这里我们将仅保留 `input_ids`、`attention_mask` 和 `label`(将重命名为 `labels`)。 要仔细检查模型输入的列,您可以打印模型的类,然后查看其文档:
```py
type(trainer.model)
```
```python out
transformers.models.distilbert.modeling_distilbert.DistilBertForSequenceClassification
```
所以在我们的例子中,我们在[在这个页面](https://huggingface.co/transformers/model_doc/distilbert.html#distilbertforsequenceclassification)可以检查上接受的参数。 `Trainer` 也会记录它丢弃的列。
我们通过解码检查了输入 ID 是否正确。 接下来是检查 `attention_mask`:
```py
trainer.train_dataset[0]["attention_mask"]
```
```python out
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
```
由于我们没有在预处理中应用填充,这看起来非常自然。 为确保该注意掩码没有问题,让我们检查它与输入 ID 的长度是否相同:
```py
len(trainer.train_dataset[0]["attention_mask"]) == len(
trainer.train_dataset[0]["input_ids"]
)
```
```python out
True
```
那挺好的! 最后,让我们检查一下我们的标签:
```py
trainer.train_dataset[0]["label"]
```
```python out
1
```
与输入 ID 一样,这是一个本身并没有真正意义的数字。 正如我们之前看到的,整数和标签名称之间的映射存储在数据集相应 *feature* 的 `names` 属性中:
```py
trainer.train_dataset.features["label"].names
```
```python out
['entailment', 'neutral', 'contradiction']
```
所以`1`表示`neutral`,表示我们上面看到的两句话并不矛盾,也没有包含关系。 这似乎是正确的!
我们这里没有令牌类型 ID,因为 DistilBERT 不需要它们; 如果您的模型中有一些,您还应该确保它们正确匹配输入中第一句和第二句的位置。
<Tip>
✏️ **轮到你了!** 检查训练数据集的第二个元素是否正确。
</Tip>
我们在这里只对训练集进行检查,但您当然应该以同样的方式仔细检查验证集和测试集。
现在我们知道我们的数据集看起来不错,是时候检查训练管道的下一步了。
### 从 datasets 到 dataloaders [[从 datasets 到 dataloaders]]
训练管道中可能出错的下一件事是当“Trainer”尝试从训练或验证集形成批次时。 一旦你确定 `Trainer` 的数据集是正确的,你可以尝试通过执行以下操作手动形成一个批次(可以将 `train` 替换为 `eval` 用于验证数据加载器):
```py
for batch in trainer.get_train_dataloader():
break
```
此代码创建训练数据加载器,然后对其进行迭代,在第一次迭代时停止。 如果代码执行没有错误,那么您就有了可以检查的第一个训练批次,如果代码出错,您可以确定问题出在数据加载器中,如下所示:
```python out
~/git/transformers/src/transformers/data/data_collator.py in torch_default_data_collator(features)
105 batch[k] = torch.stack([f[k] for f in features])
106 else:
--> 107 batch[k] = torch.tensor([f[k] for f in features])
108
109 return batch
ValueError: expected sequence of length 45 at dim 1 (got 76)
```
检查trackback的最后一个堆栈的输出应该足以给你一个线索,但让我们做更多的挖掘。 批处理创建过程中的大多数问题是由于将示例整理到单个批处理中而出现的,因此在有疑问时首先要检查的是您的 DataLoader 正在使用什么 collate_fn:
```py
data_collator = trainer.get_train_dataloader().collate_fn
data_collator
```
```python out
<function transformers.data.data_collator.default_data_collator(features: List[InputDataClass], return_tensors='pt') -> Dict[str, Any]>
```
所以,目前使用的是 `default_data_collator`,但这不是我们在这种情况下想要的。 我们希望将示例填充到批处理中最长的句子,这是由 `DataCollatorWithPadding` 整理器完成的。 而这个数据收集器应该是默认被 `Trainer` 使用的,为什么这里没有使用呢?
答案是因为我们没有将 `tokenizer` 传递给 `Trainer`,所以它无法创建我们想要的 `DataCollatorWithPadding`。 在实践中,您应该明确地传递您想要使用的数据整理器,以确保避免这些类型的错误。 让我们调整我们的代码来做到这一点:
```py
from datasets import load_dataset
import evaluate
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
DataCollatorWithPadding,
TrainingArguments,
Trainer,
)
raw_datasets = evaluate.load("glue", "mnli")
model_checkpoint = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
def preprocess_function(examples):
return tokenizer(examples["premise"], examples["hypothesis"], truncation=True)
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint)
args = TrainingArguments(
f"distilbert-finetuned-mnli",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=2e-5,
num_train_epochs=3,
weight_decay=0.01,
)
metric = evaluate.load("glue", "mnli")
def compute_metrics(eval_pred):
predictions, labels = eval_pred
return metric.compute(predictions=predictions, references=labels)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
trainer = Trainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation_matched"],
compute_metrics=compute_metrics,
data_collator=data_collator,
tokenizer=tokenizer,
)
trainer.train()
```
好消息? 我们没有得到与以前相同的错误,这绝对是进步。 坏消息? 我们得到了一个臭名昭著的 CUDA 错误:
```python out
RuntimeError: CUDA error: CUBLAS_STATUS_ALLOC_FAILED when calling `cublasCreate(handle)`
```
这很糟糕,因为 CUDA 错误通常很难调试。 我们稍后会看到如何解决这个问题,但首先让我们完成对批处理创建的分析。
如果您确定您的数据整理器是正确的,则应尝试将其应用于数据集的几个样本:
```py
data_collator = trainer.get_train_dataloader().collate_fn
batch = data_collator([trainer.train_dataset[i] for i in range(4)])
```
此代码将失败,因为 `train_dataset` 包含字符串列,`Trainer` 通常会删除这些列。 您可以手动删除它们,或者如果您想准确地修改 `Trainer` 在幕后所做的事情,您可以调用私有的 `Trainer._remove_unused_columns()` 方法来执行此操作:
```py
data_collator = trainer.get_train_dataloader().collate_fn
actual_train_set = trainer._remove_unused_columns(trainer.train_dataset)
batch = data_collator([actual_train_set[i] for i in range(4)])
```
如果错误仍然存在,您应该能够手动调试数据整理器内部以确定具体的问题。
现在我们已经调试了批处理创建过程,是时候将数据传递给模型了!
### 检查模型 [[检查模型]]
您应该能够通过执行以下命令来获得一个批次的数据:
```py
for batch in trainer.get_train_dataloader():
break
```
如果您在notebook中运行此代码,您可能会收到与我们之前看到的类似的 CUDA 错误,在这种情况下,您需要重新启动notebook并重新执行最后一个片段,而不运行 `trainer.train()` 行.这是关于 CUDA 错误的第二个最烦人的事情:它们会破坏您的Cuda内核,而且无法恢复。它们最烦人的事情是它们很难调试。
这是为什么?它与 GPU 的工作方式有关。它们在并行执行大量操作方面非常有效,但缺点是当其中一条指令导致错误时,您不会立即知道。只有当程序在 GPU 上调用多个进程的同步处理时,它才会意识到出现问题,因此错误实际上是在与创建它的原因无关的地方引发的。例如,如果我们查看之前的trackback,错误是在向后传递期间引发的,但我们会在一分钟内看到它实际上源于向前传递中的某些东西。
那么我们如何调试这些错误呢?答案很简单:我们没有。除非您的 CUDA 错误是内存不足错误(这意味着您的 GPU 中没有足够的内存),否则您应该始终返回 CPU 进行调试。
为此,我们只需将模型放回 CPU 上并在我们的一批数据中调用它——“DataLoader”返回的那批数据尚未移动到 GPU:
```python
outputs = trainer.model.cpu()(**batch)
```
```python out
~/.pyenv/versions/3.7.9/envs/base/lib/python3.7/site-packages/torch/nn/functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction)
2386 )
2387 if dim == 2:
-> 2388 ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2389 elif dim == 4:
2390 ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
IndexError: Target 2 is out of bounds.
```
所以,思路越来越清晰了。 我们现在在损失计算中没有出现 CUDA 错误,而是有一个“IndexError”(因此与我们之前所说的反向传播无关)。 更准确地说,我们可以看到是Target 2 造成了错误,所以这是检查模型标签数量的好时机:
```python
trainer.model.config.num_labels
```
```python out
2
```
有两个标签,只允许 0 和 1 作为目标,但是根据错误信息我们得到一个 2。得到一个 2 实际上是正常的:如果我们记得我们之前提取的标签名称,有三个,所以我们有索引 0 , 1 和 2 在我们的数据集中。 问题是我们没有告诉我们的模型,它应该创建三个标签。 所以让我们解决这个问题!
```py
from datasets import load_dataset
import evaluate
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
DataCollatorWithPadding,
TrainingArguments,
Trainer,
)
raw_datasets = evaluate.load("glue", "mnli")
model_checkpoint = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
def preprocess_function(examples):
return tokenizer(examples["premise"], examples["hypothesis"], truncation=True)
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=3)
args = TrainingArguments(
f"distilbert-finetuned-mnli",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=2e-5,
num_train_epochs=3,
weight_decay=0.01,
)
metric = evaluate.load("glue", "mnli")
def compute_metrics(eval_pred):
predictions, labels = eval_pred
return metric.compute(predictions=predictions, references=labels)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
trainer = Trainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation_matched"],
compute_metrics=compute_metrics,
data_collator=data_collator,
tokenizer=tokenizer,
)
```
我们还没有包含 `trainer.train()` 行,以便花时间检查一切是否正常。 如果我们请求一个批次的数据并将其传递给我们的模型,它现在可以正常工作了!
```py
for batch in trainer.get_train_dataloader():
break
outputs = trainer.model.cpu()(**batch)
```
下一步是回到 GPU 并检查一切是否仍然有效:
```py
import torch
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
batch = {k: v.to(device) for k, v in batch.items()}
outputs = trainer.model.to(device)(**batch)
```
如果仍然出现错误,请确保重新启动notebook并仅执行脚本的最后一个版本。
### 执行一个优化器步骤 [[执行一个优化器步骤]]
现在我们知道我们可以构建实际通过模型检查的成批次的数据,我们已经为训练管道的下一步做好准备:计算梯度并执行优化步骤。
第一部分只是在 loss 上调用 `backward()` 方法:
```py
loss = outputs.loss
loss.backward()
```
在这个阶段很少出现错误,但如果确实出现错误,请返回 CPU 以获取有用的错误消息。
要执行优化步骤,我们只需要创建 `optimizer` 并调用它的 `step()` 方法:
```py
trainer.create_optimizer()
trainer.optimizer.step()
```
同样,如果您在 `Trainer` 中使用默认优化器,则在此阶段您不应该收到错误,但如果您有自定义优化器,则可能会出现一些问题需要在这里调试。 如果您在此阶段遇到奇怪的 CUDA 错误,请不要忘记返回 CPU。 说到 CUDA 错误,前面我们提到了一个特殊情况。 现在让我们来看看。
### 处理 CUDA out-of-memory错误 [[处理 CUDA out-of-memory错误]]
每当您收到以`RuntimeError: CUDA out of memory`开头的错误消息时,这表明您的 GPU 内存不足。 这与您的代码没有直接关联,并且它可能发生在运行良好的代码中。 此错误意味着您试图在 GPU 的内部存储器中放入太多东西,这导致了错误。 与其他 CUDA 错误一样,您需要重新启动内核才能再次运行训练。
要解决这个问题,您只需要使用更少的 GPU 空间——这往往说起来容易做起来难。 首先,确保您没有同时在 GPU 上运行两个模型(当然,除非您的问题需要这样做)。 然后,您可能应该减少batch的大小,因为它直接影响模型的所有中间输出的大小及其梯度。 如果问题仍然存在,请考虑使用较小版本的模型。
<Tip>
在课程的下一部分中,我们将介绍更先进的技术,这些技术可以帮助您减少内存占用并让您微调最大的模型。
</Tip>
### 评估模型 [[评估模型]]
现在我们已经解决了代码的所有问题,一切都很完美,训练应该可以顺利进行,对吧? 没那么快! 如果你运行 `trainer.train()` 命令,一开始一切看起来都不错,但过一会儿你会得到以下信息:
```py
# This will take a long time and error out, so you shouldn't run this cell
trainer.train()
```
```python out
TypeError: only size-1 arrays can be converted to Python scalars
```
您将意识到此错误出现在评估阶段,因此这是我们需要调试的最后一件事。
您可以像这样在训练中独立运行`Trainer`的评估循环:
```py
trainer.evaluate()
```
```python out
TypeError: only size-1 arrays can be converted to Python scalars
```
<Tip>
💡 您应该始终确保在启动 `trainer.train()` 之前 `trainer.evaluate()`是可以运行的,以避免在遇到错误之前浪费大量计算资源。
</Tip>
在尝试调试评估循环中的问题之前,您应该首先确保您已经查看了数据,能够正确地形成批处理,并且可以在其上运行您的模型。 我们已经完成了所有这些步骤,因此可以执行以下代码而不会出错:
```py
for batch in trainer.get_eval_dataloader():
break
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = trainer.model(**batch)
```
稍等一会儿,错误出现,在评估阶段结束时,如果我们查看trackback,我们会看到:
```python trace
~/git/datasets/src/datasets/metric.py in add_batch(self, predictions, references)
431 """
432 batch = {"predictions": predictions, "references": references}
--> 433 batch = self.info.features.encode_batch(batch)
434 if self.writer is None:
435 self._init_writer()
```
这告诉我们错误源自 `datasets/metric.py` 模块——所以这是我们的 `compute_metrics()` 函数的问题。 它需要一个带有 logits 和标签的元组作为 NumPy 数组,所以让我们尝试输入它:
```py
predictions = outputs.logits.cpu().numpy()
labels = batch["labels"].cpu().numpy()
compute_metrics((predictions, labels))
```
```python out
TypeError: only size-1 arrays can be converted to Python scalars
```
我们得到同样的错误,所以问题肯定出在那个函数上。 如果我们回顾它的代码,我们会发现它只是将“预测”和“真实的标签”转发到“metric.compute()”。 那么这种方法有问题吗? 并不真地。 让我们快速浏览一下形状:
```py
predictions.shape, labels.shape
```
```python out
((8, 3), (8,))
```
我们的预测仍然是 logits,而不是实际的预测,这就是metrics返回这个(有点模糊)错误的原因。 修复很简单; 我们只需要在 `compute_metrics()` 函数中添加一个 argmax:
```py
import numpy as np
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return metric.compute(predictions=predictions, references=labels)
compute_metrics((predictions, labels))
```
```python out
{'accuracy': 0.625}
```
现在我们的错误已修复! 这是最后一个,所以我们的脚本现在将正确训练模型。
作为参考,这里是完全修正好的脚本:
```py
import numpy as np
from datasets import load_dataset
import evaluate
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
DataCollatorWithPadding,
TrainingArguments,
Trainer,
)
raw_datasets = evaluate.load("glue", "mnli")
model_checkpoint = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
def preprocess_function(examples):
return tokenizer(examples["premise"], examples["hypothesis"], truncation=True)
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=3)
args = TrainingArguments(
f"distilbert-finetuned-mnli",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=2e-5,
num_train_epochs=3,
weight_decay=0.01,
)
metric = evaluate.load("glue", "mnli")
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return metric.compute(predictions=predictions, references=labels)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
trainer = Trainer(
model,
args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation_matched"],
compute_metrics=compute_metrics,
data_collator=data_collator,
tokenizer=tokenizer,
)
trainer.train()
```
在这种情况下,如果没有更多错误,我们的脚本将微调一个应该给出合理结果的模型。 但是,如果训练没有任何错误,而训练出来的模型根本表现不佳,我们该怎么办? 这是机器学习中最难的部分,我们将向您展示一些可以提供帮助的技术。
<Tip>
💡 如果您使用手动训练循环,则相同的步骤也适用于调试训练管道,而且更容易将它们分开。 但是,请确保您没有忘记正确位置的 `model.eval()` 或 `model.train()`,或者每个步骤中的 `zero_grad()`!
</Tip>
## 在训练期间调试静默(没有任何错误提示)错误 [[在训练期间调试静默(没有任何错误提示)错误]]
我们可以做些什么来调试一个没有错误地完成但没有得到好的结果的训练? 我们会在这里给你一些提示,但请注意,这种调试是机器学习中最难的部分,并且没有神奇的答案。
### 检查您的数据(再次!) [[检查您的数据(再次!)]]
只有在理论上可以从您的数据中学到任何东西时,您的模型才会学到一些东西。 如果存在损坏数据的错误或标签是随机属性的,那么您很可能不会在数据集上获得任何知识。 因此,始终首先仔细检查您的解码输入和标签,然后问自己以下问题:
- 解码后的数据是否可以理解?
- 你认同这些标签吗?
- 有没有一个标签比其他标签更常见?
- 如果模型预测随机的答案/总是相同的答案,那么loss/评估指标应该是多少?
<Tip warning={true}>
⚠️ 如果您正在进行分布式训练,请在每个过程中打印数据集的样本,并三次检查您是否得到相同的结果。 一个常见的错误是在数据创建中有一些随机性来源,这使得每个进程都有不同版本的数据集。
</Tip>
查看您的数据后,查看模型的一些预测并对其进行解码。 如果模型总是预测同样的事情,那可能是因为你的数据集偏向一个类别(针对分类问题); 过采样稀有类等技术可能会有所帮助。
如果您在初始模型上获得的loss/评估指标与您期望的随机预测的loss/评估指标非常不同,请仔细检查您的loss或评估指标的计算方式,因为那里可能存在错误。 如果您使用最后添加的多个loss,请确保它们具有相同的规模。
当您确定您的数据是完美的时,您可以通过一个简单的测试来查看模型是否能够对其进行训练。
### 在一批上过度拟合你的模型 [[在一批上过度拟合你的模型]]
过度拟合通常是我们在训练时尽量避免的事情,因为这意味着模型没有学习识别我们想要的一般特征,而只是记住了训练样本。 在这种情况下,一遍又一遍地尝试在一个批次上训练您的模型是一个很好的测试,可以检查您的问题是否可以通过您尝试训练的模型来解决。 它还将帮助您查看您的初始学习率是否太高。
一旦你定义了你的 `Trainer` 之后,这样做真的很容易; 只需获取一批训练数据,然后仅使用该批次运行一个小型手动训练循环,大约 20 步:
```py
for batch in trainer.get_train_dataloader():
break
batch = {k: v.to(device) for k, v in batch.items()}
trainer.create_optimizer()
for _ in range(20):
outputs = trainer.model(**batch)
loss = outputs.loss
loss.backward()
trainer.optimizer.step()
trainer.optimizer.zero_grad()
```
<Tip>
💡 如果您的训练数据不平衡,请确保构建一批包含所有标签的训练数据。
</Tip>
生成的模型在一个“批次”上应该有接近完美的结果。 让我们计算结果预测的指标:
```py
with torch.no_grad():
outputs = trainer.model(**batch)
preds = outputs.logits
labels = batch["labels"]
compute_metrics((preds.cpu().numpy(), labels.cpu().numpy()))
```
```python out
{'accuracy': 1.0}
```
100% 准确率,现在这是一个很好的过拟合示例(这意味着如果你在任何其他句子上尝试你的模型,它很可能会给你一个错误的答案)!
如果你没有设法让你的模型获得这样的完美结果,这意味着你构建问题或数据的方式有问题,所以你应该修复它。 只有当你可以通过过拟合测试时,你才能确定你的模型实际上可以学到一些东西。
<Tip warning={true}>
⚠️ 在此测试之后,您将不得不重新创建您的模型和“Trainer”,因为获得的模型可能无法在您的完整数据集上恢复和学习有用的东西。
</Tip>
### 在你有第一个基线之前不要调整任何东西 [[在你有第一个基线之前不要调整任何东西]]
超参数调优总是被强调为机器学习中最难的部分,但这只是帮助您在指标上有所收获的最后一步。 大多数情况下,`Trainer` 的默认超参数可以很好地为您提供良好的结果,因此在您获得超出数据集基线的东西之前,不要开始进行耗时且昂贵的超参数搜索 .
一旦你有一个足够好的模型,你就可以开始稍微调整一下。 不要尝试使用不同的超参数启动一千次运行,而是比较一个超参数的不同值的几次运行,以了解哪个影响最大。
如果您正在调整模型本身,不要尝试任何您无法合理证明的事情。 始终确保您返回过拟合测试以验证您的更改没有产生任何意外后果。
### 请求帮忙 [[请求帮忙]]
希望您会在本节中找到一些可以帮助您解决问题的建议,但如果不是这样,请记住您可以随时在 [论坛](https://discuss.huggingface.co/) 上向社区提问。
以下是一些可能有用的额外资源:
- [“作为工程最佳实践工具的再现性”](https://docs.google.com/presentation/d/1yHLPvPhUs2KGI5ZWo0sU-PKU3GimAk3iTsI38Z-B5Gw/edit#slide=id.p),作者:Joel Grus
- [“神经网络调试清单”](https://towardsdatascience.com/checklist-for-debugging-neural-networks-d8b2a9434f21) 作者:Cecelia Shao
- [“如何对机器学习代码进行单元测试”](https://medium.com/@keeper6928/how-to-unit-test-machine-learning-code-57cf6fd81765) by Chase Roberts
- [“训练神经网络的秘诀”](http://karpathy.github.io/2019/04/25/recipe/)作者:Andrej Karpathy
当然,并不是你在训练神经网络时遇到的每一个问题都是你自己的错! 如果您在 🤗 Transformers 或 🤗 Datasets 库中遇到看起来不正确的内容,您可能遇到了错误。 你应该告诉我们这一切,在下一节中,我们将准确解释如何做到这一点。
| course/chapters/zh-CN/chapter8/4.mdx/0 | {
"file_path": "course/chapters/zh-CN/chapter8/4.mdx",
"repo_id": "course",
"token_count": 18831
} | 156 |
# 課程簡介
歡迎來到 Hugging Face 的教學!本篇介紹將會帶著你設置運行環境。如果你正開始學的話,不妨先看看[第一章](/course/chapter1)再回來,這樣就能直接開始試著執行裡面的程式碼了。
我們會用到的所有函式庫都將會以 Python 資源包的方式被取得,所以這邊我們會教你如何設置 Python 環境並安裝你所需要的函式庫。
本篇將會涵蓋兩種設置環境的方法 - 使用 Colab notebook 或是 Python 虛擬環境。選你自己覺得合適的方式就好,但是對於初學者我們強烈推薦先從使用 Colab notebook 開始。
我們不會提到 Windows 系統,如果你是 Windows 的使用者,我們建議使用 Colab notebook。如果你用的是 Linux 或是 macOS,你可以任意選擇上述的兩種方法。
大部分的教學都會需要一個Hugging Face的帳號。我們建議現在就[創一個](https://huggingface.co/join)。
## 使用Google Colab notebook
用 Colab notebook 是最簡單容易的方法;在瀏覽器開一頁 Colab notebook 就能直接開始寫程式了!
如果你對 Colab notebook 不熟悉的話,我們建議你從[這篇介紹](https://colab.research.google.com/notebooks/intro.ipynb)開始。在 Colab 上你可以使用一些加速硬體,像是 GPU 或 TPU,而且工作量不大的話也不收費。
當你開始熟悉 Colab 後,建立新的筆記本然後開始進行設置:
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/new_colab.png" alt="An empty colab notebook" width="80%"/>
</div>
接下來就是安裝我們將會用到的函式庫。我們會使用 `pip` 這個Python的資源管理工具來安裝。在Colab notebook裡,你可以用 `!` 來執行系統指令,所以你可以用以下的指令來安裝 🤗 Transformers 函式庫:
```
!pip install transformers
```
把函式庫導入到 Python runtime 可以確認你的資源包有被正確地安裝:
```
import transformers
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/install.gif" alt="A gif showing the result of the two commands above: installation and import" width="80%"/>
</div>
這會安裝一個非常輕量的 🤗 Transformers。裡面沒有安裝任何像是 PyTorch 或 TensorFlow 等的機器學習框架。因為我們會用到很多函式庫裡的不同功能,所以我們建議安裝包含了大部分使用情境所需資源的開發用版本:
```
!pip install transformers[sentencepiece]
```
這會花一點時間,不過裝完你就已經完全準備好面對剩下的教學了!
## 使用Python虛擬環境
如果你比較想用 Python 虛擬環境的話,第一步就是安裝 Python。我們建議跟著[這篇教學](https://realpython.com/installing-python/)做為起手式。
當你安裝好 Python 後,你應該就能從終端機執行 Python 指令了。在進行下一步之前你可以先執行以下指令來確認 Python 有沒有安裝好:`python --version` 這條指令會讓終端機顯示你所安裝的 Python 版本。
在終端機執行像是`python --version`的 Python 指令時,你應該把你的指令想成是用你系統上主要的 Python 版本來執行。我們建議不要在這個版本上安裝任何資源包,讓每個專案在各自獨立的環境裡運行就可以了。這樣每個專案都可以有各自的相依性跟資源包,你也不用擔心不同專案之間使用同一個環境時潛在的相容性問題。
在 Python 我們可以用[*虛擬環境*](https://docs.python.org/3/tutorial/venv.html)來做這件事。虛擬環境是一個獨立包裝的樹狀目錄,每一個目錄下都有安裝特定版本的Python跟它需要的所有資源包。創建這樣的虛擬環境可以用很多不同的工具,不過我們會用一個叫做[`venv`](https://docs.python.org/3/library/venv.html#module-venv)的Python官方資源包。
首先,創建你希望你的程式執行時所在的目錄 - 舉例來說,你可能想要在你的家目錄下新增一個叫*transformers-course*的目錄:
```
mkdir ~/transformers-course
cd ~/transformers-course
```
從這個目錄裡面,你可以用Python的`venv`模組建立一個虛擬環境:
```
python -m venv .env
```
你現在應該在你的空資料夾裡找到一個叫做*.env*的目錄,這個目錄就是你的虛擬環境。
You should now have a directory called *.env* in your otherwise empty folder:
```
ls -a
```
```out
. .. .env
```
你可用 `activate` 和 `deactivate` 這兩個腳本來啟用或關閉你的虛擬環境:
```
# Activate the virtual environment
source .env/bin/activate
# Deactivate the virtual environment
source .env/bin/deactivate
```
你可以執行 `which python` 指令來確認你的虛擬環境是否有被啟用:如果它指向虛擬環境的目錄,那表示你的虛擬環境已經啟用了!
```
which python
```
```out
/home/<user>/transformers-course/.env/bin/python
```
### 安裝相依性資源包
在之前的段落中提到的使用 Google Colab 的情況裡,你會需要安裝相依性資源包才能繼續。你可以用 `pip` 這個資源管理工具來安裝開發版的🤗 Transformers:
```
pip install "transformers[sentencepiece]"
```
你現在已經準備就緒,可以開始了!
| course/chapters/zh-TW/chapter0/1.mdx/0 | {
"file_path": "course/chapters/zh-TW/chapter0/1.mdx",
"repo_id": "course",
"token_count": 3260
} | 157 |
<FrameworkSwitchCourse {fw} />
# 把它們放在一起
{#if fw === 'pt'}
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section6_pt.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section6_pt.ipynb"},
]} />
{:else}
<CourseFloatingBanner chapter={2}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section6_tf.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section6_tf.ipynb"},
]} />
{/if}
在最後幾節中,我們一直在盡最大努力手工完成大部分工作。我們探討了標記化器的工作原理,並研究了標記化、到輸入ID的轉換、填充、截斷和注意掩碼。
然而,正如我們在第2節中所看到的,🤗 Transformers API可以通過一個高級函數為我們處理所有這些,我們將在這裡深入討論。當你直接在句子上調用標記器時,你會得到準備通過模型傳遞的輸入
```py
from transformers import AutoTokenizer
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
sequence = "I've been waiting for a HuggingFace course my whole life."
model_inputs = tokenizer(sequence)
```
這裡,`model_inputs`
變量包含模型良好運行所需的一切。對於DistilBERT,它包括輸入 ID和注意力掩碼(attention mask)。其他接受額外輸入的模型也會有標記器對象的輸出。
正如我們將在下面的一些示例中看到的,這種方法非常強大。首先,它可以標記單個序列:
```py
sequence = "I've been waiting for a HuggingFace course my whole life."
model_inputs = tokenizer(sequence)
```
它還一次處理多個序列,並且API沒有任何變化:
```py
sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"]
model_inputs = tokenizer(sequences)
```
它可以根據幾個目標進行填充:
```py
# Will pad the sequences up to the maximum sequence length
model_inputs = tokenizer(sequences, padding="longest")
# Will pad the sequences up to the model max length
# (512 for BERT or DistilBERT)
model_inputs = tokenizer(sequences, padding="max_length")
# Will pad the sequences up to the specified max length
model_inputs = tokenizer(sequences, padding="max_length", max_length=8)
```
它還可以截斷序列:
```py
sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"]
# Will truncate the sequences that are longer than the model max length
# (512 for BERT or DistilBERT)
model_inputs = tokenizer(sequences, truncation=True)
# Will truncate the sequences that are longer than the specified max length
model_inputs = tokenizer(sequences, max_length=8, truncation=True)
```
標記器對象可以處理到特定框架張量的轉換,然後可以直接發送到模型。例如,在下面的代碼示例中,我們提示標記器從不同的框架返回張量——`"pt"`返回Py Torch張量,`"tf"`返回TensorFlow張量,`"np"`返回NumPy數組:
```py
sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"]
# Returns PyTorch tensors
model_inputs = tokenizer(sequences, padding=True, return_tensors="pt")
# Returns TensorFlow tensors
model_inputs = tokenizer(sequences, padding=True, return_tensors="tf")
# Returns NumPy arrays
model_inputs = tokenizer(sequences, padding=True, return_tensors="np")
```
## 特殊詞符(token)
如果我們看一下標記器返回的輸入 ID,我們會發現它們與之前的略有不同:
```py
sequence = "I've been waiting for a HuggingFace course my whole life."
model_inputs = tokenizer(sequence)
print(model_inputs["input_ids"])
tokens = tokenizer.tokenize(sequence)
ids = tokenizer.convert_tokens_to_ids(tokens)
print(ids)
```
```python out
[101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012, 102]
[1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012]
```
一個在開始時添加了一個標記(token) ID,一個在結束時添加了一個標記(token) ID。讓我們解碼上面的兩個ID序列,看看這是怎麼回事:
```py
print(tokenizer.decode(model_inputs["input_ids"]))
print(tokenizer.decode(ids))
```
```python out
"[CLS] i've been waiting for a huggingface course my whole life. [SEP]"
"i've been waiting for a huggingface course my whole life."
```
標記器在開頭添加了特殊單詞`[CLS]`,在結尾添加了特殊單詞`[SEP]`。這是因為模型是用這些數據預訓練的,所以為了得到相同的推理結果,我們還需要添加它們。請注意,有些模型不添加特殊單詞,或者添加不同的單詞;模型也可能只在開頭或結尾添加這些特殊單詞。在任何情況下,標記器都知道需要哪些詞符,並將為您處理這些詞符。
## 結束:從標記器到模型
現在我們已經看到了標記器對象在應用於文本時使用的所有單獨步驟,讓我們最後一次看看它如何處理多個序列(填充!),非常長的序列(截斷!),以及多種類型的張量及其主要API:
{#if fw === 'pt'}
```py
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = AutoModelForSequenceClassification.from_pretrained(checkpoint)
sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"]
tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="pt")
output = model(**tokens)
```
{:else}
```py
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
checkpoint = "distilbert-base-uncased-finetuned-sst-2-english"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint)
sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"]
tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="tf")
output = model(**tokens)
```
{/if}
| course/chapters/zh-TW/chapter2/6.mdx/0 | {
"file_path": "course/chapters/zh-TW/chapter2/6.mdx",
"repo_id": "course",
"token_count": 3180
} | 158 |
# 本章簡介
<CourseFloatingBanner
chapter={5}
classNames="absolute z-10 right-0 top-0"
/>
在[第三章](/course/chapter3)第一次體驗了 🤗Datasets 庫,並發現在微調模型時有三個主要步驟:
1. 從 Hugging Face Hub 加載一個數據集。
2. 使用 Dataset.map() 對數據進行預處理。
3. 載入和計算指標(特徵)。
但這只是🤗 Datasets的表面功能而已!在本章中,我們將深入瞭解這個庫。在此過程中,我們將找到以下問題的答案:
* 當數據集不在 hub 上時,您該怎麼做?
* 如何對數據集進行切片?(如果你真正的特別需要使用pandas的時候該怎麼辦?)
* 當你的數據集很大,會撐爆你筆記本電腦的RAM時,你會怎麼做?
* 「內存映射」和 Apache Arrow 到底是什麼?
* 如何創建自己的數據集並將其推送到中心?
您在這裡學到的技術將為您在[第6章](/course/chapter6)和[第7章](/course/chapter7)中的高級標記化和微調任務做好準備——所以,喝杯咖啡,讓我們開始吧! | course/chapters/zh-TW/chapter5/1.mdx/0 | {
"file_path": "course/chapters/zh-TW/chapter5/1.mdx",
"repo_id": "course",
"token_count": 731
} | 159 |
# Unigram標記化
<CourseFloatingBanner chapter={6}
classNames="absolute z-10 right-0 top-0"
notebooks={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter6/section7.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter6/section7.ipynb"},
]} />
在 SentencePiece 中經常使用 Unigram 算法,該算法是 AlBERT、T5、mBART、Big Bird 和 XLNet 等模型使用的標記化算法。
<Youtube id="TGZfZVuF9Yc"/>
<Tip>
💡 本節深入介紹了 Unigram,甚至展示了一個完整的實現。如果你只想大致瞭解標記化算法,可以跳到最後。
</Tip>
## 訓練算法
與 BPE 和 WordPiece 相比,Unigram 在另一個方向上工作:它從一個較大的詞彙表開始,然後從中刪除標記,直到達到所需的詞彙表大小。有多種選項可用於構建基本詞彙表:例如,我們可以採用預標記化單詞中最常見的子串,或者在具有大詞彙量的初始語料庫上應用 BPE。
在訓練的每一步,Unigram 算法都會在給定當前詞彙的情況下計算語料庫的損失。然後,對於詞彙表中的每個符號,算法計算如果刪除該符號,整體損失會增加多少,並尋找增加最少的符號。這些符號對語料庫的整體損失影響較小,因此從某種意義上說,它們「不太需要」並且是移除的最佳候選者。
這是一個非常昂貴的操作,所以我們不只是刪除與最低損失增加相關的單個符號,而且\\(p\\) (\\(p\\)是一個可以控制的超參數,通常是 10 或 20)與最低損失增加相關的符號的百分比。然後重複這個過程,直到詞彙量達到所需的大小。
請注意:我們從不刪除基本字符,以確保可以標記任何單詞。
這或許仍然有點模糊:算法的主要部分是計算語料庫的損失,並查看當我們從詞彙表中刪除一些標記時它會如何變化,但我們還沒有解釋如何做到這一點。這一步依賴於 Unigram 模型的標記化算法,因此我們接下來將深入研究。
我們將重用前面示例中的語料庫:
```
("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5)
```
對於此示例,我們將採用初始詞彙表的所有嚴格子字符串:
```
["h", "u", "g", "hu", "ug", "p", "pu", "n", "un", "b", "bu", "s", "hug", "gs", "ugs"]
```
## 標記化算法
Unigram 模型是一種語言模型,它認為每個標記都獨立於它之前的標記。它是最簡單的語言模型,從某種意義上說, 給定先前上下文的標記 X 的概率就是標記 X 的概率。因此,如果我們使用 Unigram 語言模型生成文本,我們將始終預測最常見的標記。
給定標記的概率是它在原始語料庫中的頻率(我們找到它的次數),除以詞彙表中所有標記的所有頻率的總和(以確保概率總和為 1)。例如, `"ug"` 在 `"hug"` 、 `"pug"` 以及 `"hugs"` 中,所以它在我們的語料庫中的頻率為 20。
以下是詞彙表中所有可能的子詞的出現頻率:
```
("h", 15) ("u", 36) ("g", 20) ("hu", 15) ("ug", 20) ("p", 17) ("pu", 17) ("n", 16)
("un", 16) ("b", 4) ("bu", 4) ("s", 5) ("hug", 15) ("gs", 5) ("ugs", 5)
```
所以,所有頻率之和為210, 並且子詞 `"ug"` 出現的概率是 20/210。
<Tip>
✏️ **現在輪到你了!** 編寫代碼來計算上面的頻率,並仔細檢查顯示的結果以及總和是否正確。
</Tip>
現在,為了對給定的單詞進行標記,我們將所有可能的分割視為標記,並根據 Unigram 模型計算每個分割的概率。由於所有標記都被認為是獨立的,所以這個概率只是每個標記概率的乘積。例如 `"pug"` 的標記化 `["p", "u", "g"]` 的概率為:
$$P([``p", ``u", ``g"]) = P(``p") \times P(``u") \times P(``g") = \frac{5}{210} \times \frac{36}{210} \times \frac{20}{210} = 0.000389$$
相比之下,標記化 `["pu", "g"]` 的概率為:
$$P([``pu", ``g"]) = P(``pu") \times P(``g") = \frac{5}{210} \times \frac{20}{210} = 0.0022676$$
所以一個更有可能。一般來說,具有儘可能少的標記的標記化將具有最高的概率(因為每個標記重複除以 210),這對應於我們直觀想要的:將一個單詞分成儘可能少的標記。
使用 Unigram 模型對單詞進行分詞是概率最高的分詞。在示例 `"pug"` 中,這裡是我們為每個可能的分割獲得的概率:
```
["p", "u", "g"] : 0.000389
["p", "ug"] : 0.0022676
["pu", "g"] : 0.0022676
```
所以 `"pug"` 將被標記為 `["p", "ug"]` 或者 `["pu", "g"]`,取決於首先遇到這些分割中的哪一個(請注意:在更大的語料庫中,這樣的相等的情況很少見)。
在這種情況下,很容易找到所有可能的分割並計算它們的概率,但一般來說會有點困難。有一種用於此的經典算法,稱為 *維特比(Viterbi)算法*。本質上,我們可以構建一個圖來檢測給定單詞的可能分割,如果從_a_到_b_的子詞在詞彙表中,則從字符_a_到字符_b_之間存在一個分支,並將子詞的概率歸因於該分支。
為了在該圖中找到將具有最佳分數的路徑,維特比算法為單詞中的每個位置確定在該位置結束的具有最佳分數的分段。由於我們從開始到結束,可以通過循環遍歷以當前位置結尾的所有子詞,然後使用該子詞開始位置的最佳標記化分數來找到最佳分數。然後,我們只需要展開到達終點所採取的路徑。
讓我們看一個使用我們的詞彙表和單詞 `"unhug"` 的例子。對於每個位置,以最好的分數結尾的子詞如下:
```
Character 0 (u): "u" (score 0.171429)
Character 1 (n): "un" (score 0.076191)
Character 2 (h): "un" "h" (score 0.005442)
Character 3 (u): "un" "hu" (score 0.005442)
Character 4 (g): "un" "hug" (score 0.005442)
```
因此 `"unhug"` 將被標記為 `["un", "hug"]`。
<Tip>
✏️ **現在輪到你了!** 確定單詞 `"huggun"` 的標記化及其分數。
</Tip>
## 回到訓練
現在我們已經瞭解了標記化的工作原理,我們可以更深入地研究訓練期間使用的損失。在任何給定的階段,這個損失是通過對語料庫中的每個單詞進行標記來計算的,使用當前詞彙表和由語料庫中每個標記的頻率確定的 Unigram 模型(如前所述)。
語料庫中的每個詞都有一個分數,損失是這些分數的負對數似然 -- 即所有詞的語料庫中所有詞的總和 `-log(P(word))`。
讓我們用以下語料庫回到我們的例子:
```
("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5)
```
每個單詞的標記化及其各自的分數是:
```
"hug": ["hug"] (score 0.071428)
"pug": ["pu", "g"] (score 0.007710)
"pun": ["pu", "n"] (score 0.006168)
"bun": ["bu", "n"] (score 0.001451)
"hugs": ["hug", "s"] (score 0.001701)
```
所以損失是:
```
10 * (-log(0.071428)) + 5 * (-log(0.007710)) + 12 * (-log(0.006168)) + 4 * (-log(0.001451)) + 5 * (-log(0.001701)) = 169.8
```
現在我們需要計算刪除每個標記如何影響損失。這相當乏味,所以我們在這裡只對兩個標記進行操作,並保存整個過程以備有代碼來幫助我們。在這個(非常)特殊的情況下,我們對所有單詞有兩個等效的標記:正如我們之前看到的,例如, `"pug"` 可以以相同的分數被標記為 `["p", "ug"]`。因此,去除詞彙表中的 `"pu"` 標記將給出完全相同的損失。
另一方面,去除 `"hug"` 損失變得更糟, 因為 `"hug"` 和 `"hugs"` 的標記化會變成:
```
"hug": ["hu", "g"] (score 0.006802)
"hugs": ["hu", "gs"] (score 0.001701)
```
這些變化將導致損失增加:
```
- 10 * (-log(0.071428)) + 10 * (-log(0.006802)) = 23.5
```
因此, 標記 `"pu"`可能會從詞彙表中刪除,但不會刪除 `"hug"`.
## 實現 Unigram
現在讓我們在代碼中實現我們迄今為止看到的所有內容。與 BPE 和 WordPiece 一樣,這不是 Unigram 算法的有效實現(恰恰相反),但它應該可以幫助你更好地理解它。
我們將使用與之前相同的語料庫作為示例:
```python
corpus = [
"This is the Hugging Face course.",
"This chapter is about tokenization.",
"This section shows several tokenizer algorithms.",
"Hopefully, you will be able to understand how they are trained and generate tokens.",
]
```
這一次,我們將使用 `xlnet-base-cased` 作為我們的模型:
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased")
```
與 BPE 和 WordPiece 一樣,我們首先計算語料庫中每個單詞的出現次數:
```python
from collections import defaultdict
word_freqs = defaultdict(int)
for text in corpus:
words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text)
new_words = [word for word, offset in words_with_offsets]
for word in new_words:
word_freqs[word] += 1
word_freqs
```
然後,我們需要將我們的詞彙表初始化為大於我們最終想要的詞彙量。我們必須包含所有基本字符(否則我們將無法標記每個單詞),但對於較大的子字符串,我們將只保留最常見的字符,因此我們按頻率對它們進行排序:
```python
char_freqs = defaultdict(int)
subwords_freqs = defaultdict(int)
for word, freq in word_freqs.items():
for i in range(len(word)):
char_freqs[word[i]] += freq
# Loop through the subwords of length at least 2
for j in range(i + 2, len(word) + 1):
subwords_freqs[word[i:j]] += freq
# Sort subwords by frequency
sorted_subwords = sorted(subwords_freqs.items(), key=lambda x: x[1], reverse=True)
sorted_subwords[:10]
```
```python out
[('▁t', 7), ('is', 5), ('er', 5), ('▁a', 5), ('▁to', 4), ('to', 4), ('en', 4), ('▁T', 3), ('▁Th', 3), ('▁Thi', 3)]
```
我們用最優的子詞對字符進行分組,以獲得大小為 300 的初始詞彙表:
```python
token_freqs = list(char_freqs.items()) + sorted_subwords[: 300 - len(char_freqs)]
token_freqs = {token: freq for token, freq in token_freqs}
```
<Tip>
💡 SentencePiece 使用一種稱為增強後綴數組(ESA)的更高效算法來創建初始詞彙表。
</Tip>
接下來,我們計算所有頻率的總和,將頻率轉換為概率。對於我們的模型,我們將存儲概率的對數,因為添加對數比乘以小數在數值上更穩定,這將簡化模型損失的計算:
```python
from math import log
total_sum = sum([freq for token, freq in token_freqs.items()])
model = {token: -log(freq / total_sum) for token, freq in token_freqs.items()}
```
N現在主要功能是使用 Viterbi 算法標記單詞的功能。正如我們之前看到的,該算法計算單詞的每個子串的最佳分段,我們將其存儲在名為 `best_segmentations` 的變量中。我們將在單詞的每個位置(從 0 到其總長度)存儲一個字典,有兩個鍵:最佳分割中最後一個標記的開始索引,以及最佳分割的分數。使用最後一個標記的開始索引,一旦列表完全填充,我們將能夠檢索完整的分段。
填充列表只需兩個循環:主循環遍歷每個起始位置,第二個循環嘗試從該起始位置開始的所有子字符串。如果子串在詞彙表中,我們有一個新的詞分段,直到該結束位置,我們將其與 `best_segmentations` 相比較。
一旦主循環完成,我們就從結尾開始,從一個開始位置跳到下一個,記錄我們前進的標記,直到我們到達單詞的開頭:
```python
def encode_word(word, model):
best_segmentations = [{"start": 0, "score": 1}] + [
{"start": None, "score": None} for _ in range(len(word))
]
for start_idx in range(len(word)):
# This should be properly filled by the previous steps of the loop
best_score_at_start = best_segmentations[start_idx]["score"]
for end_idx in range(start_idx + 1, len(word) + 1):
token = word[start_idx:end_idx]
if token in model and best_score_at_start is not None:
score = model[token] + best_score_at_start
# If we have found a better segmentation ending at end_idx, we update
if (
best_segmentations[end_idx]["score"] is None
or best_segmentations[end_idx]["score"] > score
):
best_segmentations[end_idx] = {"start": start_idx, "score": score}
segmentation = best_segmentations[-1]
if segmentation["score"] is None:
# We did not find a tokenization of the word -> unknown
return ["<unk>"], None
score = segmentation["score"]
start = segmentation["start"]
end = len(word)
tokens = []
while start != 0:
tokens.insert(0, word[start:end])
next_start = best_segmentations[start]["start"]
end = start
start = next_start
tokens.insert(0, word[start:end])
return tokens, score
```
我們已經可以在一些詞上嘗試我們的初始模型:
```python
print(encode_word("Hopefully", model))
print(encode_word("This", model))
```
```python out
(['H', 'o', 'p', 'e', 'f', 'u', 'll', 'y'], 41.5157494601402)
(['This'], 6.288267030694535)
```
現在很容易計算模型在語料庫上的損失!
```python
def compute_loss(model):
loss = 0
for word, freq in word_freqs.items():
_, word_loss = encode_word(word, model)
loss += freq * word_loss
return loss
```
我們可以檢查它是否適用於我們擁有的模型:
```python
compute_loss(model)
```
```python out
413.10377642940875
```
計算每個標記的分數也不是很難;我們只需要計算通過刪除每個標記獲得的模型的損失:
```python
import copy
def compute_scores(model):
scores = {}
model_loss = compute_loss(model)
for token, score in model.items():
# We always keep tokens of length 1
if len(token) == 1:
continue
model_without_token = copy.deepcopy(model)
_ = model_without_token.pop(token)
scores[token] = compute_loss(model_without_token) - model_loss
return scores
```
我們可以在給定的標記上嘗試:
```python
scores = compute_scores(model)
print(scores["ll"])
print(scores["his"])
```
自從 `"ll"` 用於標記化 `"Hopefully"`, 刪除它可能會讓我們使用標記 `"l"` 兩次相反,我們預計它將產生正損失。 `"his"` 僅在單詞`"This"` 內使用,它被標記為自身,所以我們期望它的損失為零。結果如下:
```python out
6.376412403623874
0.0
```
<Tip>
💡 這種方法非常低效,因此 SentencePiece 使用了沒有標記 X 的模型損失的近似值:它不是從頭開始,而是通過其在剩餘詞彙表中的分段替換標記 X。這樣,所有分數可以與模型損失同時計算。
</Tip>
完成所有這些後,我們需要做的最後一件事是將模型使用的特殊標記添加到詞彙表中,然後循環直到我們從詞彙表中修剪了足夠的標記以達到我們想要的大小:
```python
percent_to_remove = 0.1
while len(model) > 100:
scores = compute_scores(model)
sorted_scores = sorted(scores.items(), key=lambda x: x[1])
# Remove percent_to_remove tokens with the lowest scores.
for i in range(int(len(model) * percent_to_remove)):
_ = token_freqs.pop(sorted_scores[i][0])
total_sum = sum([freq for token, freq in token_freqs.items()])
model = {token: -log(freq / total_sum) for token, freq in token_freqs.items()}
```
然後,為了標記一些文本,我們只需要應用預標記化,然後使用我們的 `encode_word()` 函數:
```python
def tokenize(text, model):
words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text)
pre_tokenized_text = [word for word, offset in words_with_offsets]
encoded_words = [encode_word(word, model)[0] for word in pre_tokenized_text]
return sum(encoded_words, [])
tokenize("This is the Hugging Face course.", model)
```
```python out
['▁This', '▁is', '▁the', '▁Hugging', '▁Face', '▁', 'c', 'ou', 'r', 's', 'e', '.']
```
Unigram 就是這樣!希望現在你感覺自己是標記器所有方面的專家。在下一節中,我們將深入研究 🤗 Tokenizers 庫的構建塊,並向您展示如何使用它們來構建您自己的標記器。
| course/chapters/zh-TW/chapter6/7.mdx/0 | {
"file_path": "course/chapters/zh-TW/chapter6/7.mdx",
"repo_id": "course",
"token_count": 9937
} | 160 |
<FrameworkSwitchCourse {fw} />
# Debugging the training pipeline
<DocNotebookDropdown
classNames="absolute z-10 right-0 top-0"
options={[
{label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/chapter8/section4_tf.ipynb"},
{label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/chapter8/section4_tf.ipynb"},
]} />
你已經編寫了一個漂亮的腳本來訓練或微調給定任務的模型,盡職盡責地遵循 [第七章](/course/chapter7) 中的建議。 但是當你啟動命令 `model.fit()` 時,可怕的事情發生了:你得到一個錯誤😱! 或者更糟糕的是,一切似乎都很好,訓練運行沒有錯誤,但生成的模型很糟糕。 在本節中,我們將向您展示如何調試此類問題。
## Debugging the training pipeline
<Youtube id="N9kO52itd0Q"/>
The problem when you encounter an error in `model.fit()` is that it could come from multiple sources, as training usually brings together a lot of things that you've been working on up until that point. The problem could be something wrong in your dataset, or some issue when trying to batch elements of the datasets together. Or it could be something wrong in the model code, or your loss function or optimizer. And even if everything goes well for training, something could still go wrong during the evaluation if there is a problem with your metric.
The best way to debug an error that arises in `model.fit()` is to manually go through this whole pipeline to see where things went awry. The error is then often very easy to solve.
To demonstrate this, we will use the following script that (tries to) fine-tune a DistilBERT model on the [MNLI dataset](https://huggingface.co/datasets/glue):
當您在 `model.fit()` 中遇到錯誤時,問題在於它可能來自多個來源,因為訓練通常會彙集很多您在此之前一直在做的事情。 問題可能是您的數據集中有問題,或者是在嘗試將數據集的元素批處理在一起時出現問題。 或者模型代碼、損失函數或優化器中可能有問題。 即使訓練一切順利,如果您的指標有問題,評估期間仍然可能出現問題。
調試 `model.fit()` 中出現的錯誤的最佳方法是手動檢查整個管道,看看哪裡出了問題。 錯誤通常很容易解決。
為了證明這一點,我們將使用以下腳本(嘗試)在 [MNLI 數據集](https://huggingface.co/datasets/glue)上微調 DistilBERT 模型:
```py
from datasets import load_dataset, load_metric
from transformers import (
AutoTokenizer,
TFAutoModelForSequenceClassification,
)
raw_datasets = load_dataset("glue", "mnli")
model_checkpoint = "distilbert-base-uncased"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
def preprocess_function(examples):
return tokenizer(examples["premise"], examples["hypothesis"], truncation=True)
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True)
train_dataset = tokenized_datasets["train"].to_tf_dataset(
columns=["input_ids", "labels"], batch_size=16, shuffle=True
)
validation_dataset = tokenized_datasets["validation_matched"].to_tf_dataset(
columns=["input_ids", "labels"], batch_size=16, shuffle=True
)
model = TFAutoModelForSequenceClassification.from_pretrained(model_checkpoint)
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
model.fit(train_dataset)
```
如果您嘗試執行它,在進行數據集轉換時可能會收到一些“VisibleDeprecationWarning”——這是我們已知的 UX 問題,因此請忽略它。 如果你在 2021 年 11 月之後閱讀這門課程並且它仍在繼續,那麼請在推特上 @carrigmat 上發送憤怒的推文,直到他修復它。
然而,更嚴重的問題是我們得到了一個徹底的錯誤。 它真的非常長:
```python out
ValueError: No gradients provided for any variable: ['tf_distil_bert_for_sequence_classification/distilbert/embeddings/word_embeddings/weight:0', '...']
```
這意味著什麼? 我們試圖訓練我們的數據,但我們沒有梯度? 這很令人困惑。 我們甚至不知道該如何開始調試類似的東西? 當你得到的錯誤並不能立即表明問題出在哪裡時,最好的解決方案通常是按順序檢查所有內容,確保在每個階段一切看起來都是正確的。 當然,開始的地方總是...
### 檢查您的數據
這是不言而喻的,但如果您的數據已損壞,Keras 將無法為您修復它。 所以首先,你需要看看你的訓練集中有什麼。
儘管查看 `raw_datasets` 和 `tokenized_datasets` 很誘人,但我們強烈建議您在數據將要進入模型的地方直接查看數據。 這意味著應該從您使用 `to_tf_dataset()` 函數創建的 `tf.data.Dataset` 讀取輸出! 那麼我們該怎麼做呢? `tf.data.Dataset` 對象一次給我們整個批次並且不支持索引,所以我們不能只請求 `train_dataset[0]`。 但是,我們可以禮貌地向它要一批:
```py
for batch in train_dataset:
break
```
`break` 在一次迭代後結束循環,因此這會抓取來自`train_dataset` 的第一批並將其保存為`batch`。 現在,讓我們看看裡面有什麼:
```python out
{'attention_mask': <tf.Tensor: shape=(16, 76), dtype=int64, numpy=
array([[1, 1, 1, ..., 0, 0, 0],
[1, 1, 1, ..., 0, 0, 0],
[1, 1, 1, ..., 0, 0, 0],
...,
[1, 1, 1, ..., 1, 1, 1],
[1, 1, 1, ..., 0, 0, 0],
[1, 1, 1, ..., 0, 0, 0]])>,
'label': <tf.Tensor: shape=(16,), dtype=int64, numpy=array([0, 2, 1, 2, 1, 1, 2, 0, 0, 0, 1, 0, 1, 2, 2, 1])>,
'input_ids': <tf.Tensor: shape=(16, 76), dtype=int64, numpy=
array([[ 101, 2174, 1010, ..., 0, 0, 0],
[ 101, 3174, 2420, ..., 0, 0, 0],
[ 101, 2044, 2048, ..., 0, 0, 0],
...,
[ 101, 3398, 3398, ..., 2051, 2894, 102],
[ 101, 1996, 4124, ..., 0, 0, 0],
[ 101, 1999, 2070, ..., 0, 0, 0]])>}
```
這看起來不錯,不是嗎?我們將 `labels` 、`attention_mask` 和 `input_ids` 傳遞給模型,這應該是計算輸出和計算損失所需的一切。那麼為什麼我們沒有梯度呢?仔細看:我們將單個字典作為輸入傳遞,但訓練批次通常是輸入張量或字典,加上標籤張量。我們的標籤只是我們輸入字典中的一個鍵。
這是一個問題嗎?實際上,並非總是如此!但這是您在使用 TensorFlow 訓練 Transformer 模型時會遇到的最常見問題之一。我們的模型都可以在內部計算損失,但要做到這一點,需要在輸入字典中傳遞標籤。這是當我們沒有為 `compile()` 指定損失值時使用的損失。另一方面,Keras 通常希望標籤與輸入字典分開傳遞,如果你不這樣做,損失計算通常會失敗。
問題現在變得更清楚了:我們傳遞了一個“損失”參數,這意味著我們要求 Keras 為我們計算損失,但我們將標籤作為模型的輸入傳遞,而不是放在 Keras 期望的地方的!我們需要二選一:要麼使用模型的內部損失並將標籤保留在原處,要麼繼續使用 Keras 損失,但將標籤移動到 Keras 期望的位置。為簡單起見,讓我們採用第一種方法。將對 `compile()` 的調用更改為:
```py
model.compile(optimizer="adam")
```
現在我們將使用模型的內部損失,這個問題應該解決了!
<Tip>
✏️ **輪到你了!** 作為我們解決其他問題後的可選挑戰,你可以嘗試回到這一步,讓模型使用原始 Keras 計算的損失而不是內部損失。 您需要將 `"labels"` 添加到 `to_tf_dataset()` 的 `label_cols` 參數,以確保正確輸出標籤,這將為您提供梯度——但我們指定的損失還有一個問題 . 訓練仍然會遇到這個問題,學習會非常緩慢,並且會在多次訓練損失時達到穩定狀態。 你能弄清楚它是什麼嗎?
一個 ROT13 編碼的提示,如果你卡住了:Vs lbh ybbx ng gur bhgchgf bs FrdhraprPynffvsvpngvba zbqryf va Genafsbezref, gurve svefg bhgchg vf `ybtvgf`。 榮格納 ybtvgf?
第二個提示:Jura lbh fcrpvsl bcgvzvmref, npgvingvbaf 是 ybffrf jvgu fgevatf, Xrenf frgf nyy gur nethzrag inyhrf gb gurve qrsnhygf。 Jung nethzragf qbrf FcnefrPngrtbevpnyPebffragebcl unir, naq jung ner gurve qrsnhygf?
</Tip>
現在,讓我們嘗試訓練。 我們現在應該得到梯度,所以希望(這裡播放不祥的音樂)我們可以調用 `model.fit()` 一切都會正常工作!
```python out
246/24543 [..............................] - ETA: 15:52 - loss: nan
```
Oh no.
`nan` 不是一個非常令人開心的損失值。 儘管如此,我們已經檢查了我們的數據,它看起來還不錯。 如果這不是問題,我們下一步該去哪裡? 顯而易見的下一步是...
### 檢查你的模型
`model.fit()` 是 Keras 中一個非常方便的函數,但它為您做了很多事情,這使得準確找到問題發生的位置變得更加棘手。 如果您正在調試您的模型,一個真正有用的策略是隻將一個批次傳遞給模型,並詳細查看該批次的輸出。 如果模型拋出錯誤,另一個非常有用的提示是使用 `run_eagerly=True` `compile()` 模型。 這會使它變慢很多,但它會使錯誤消息更容易理解,因為它們會準確地指出問題發生在模型代碼的哪個位置。
不過,目前我們還不需要 `run_eagerly`。 讓我們通過模型運行我們之前得到的“批處理”,看看輸出是什麼樣子的:
```py
model(batch)
```
```python out
TFSequenceClassifierOutput(loss=<tf.Tensor: shape=(16,), dtype=float32, numpy=
array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan,
nan, nan, nan], dtype=float32)>, logits=<tf.Tensor: shape=(16, 2), dtype=float32, numpy=
array([[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan],
[nan, nan]], dtype=float32)>, hidden_states=None, attentions=None)
```
嗯,這很棘手。一切都是`nan`!但這很奇怪,不是嗎?我們所有的 logits 怎麼會變成“nan”? `nan` 的意思是“不是數字”。 `nan` 值經常出現在您執行禁止操作時,例如除以零。但是,在機器學習中瞭解 `nan` 非常重要的一件事是,該值傾向於*傳播*。如果將一個數字乘以 `nan`,則輸出也是 `nan`。如果你在輸出、損失或梯度的任何地方得到一個“nan”,那麼它會迅速傳播到你的整個模型中——因為當那個“nan”值通過你的網絡傳播回來時,你會得到nan 梯度,當使用這些梯度計算權重更新時,您將獲得 nan 權重,這些權重將計算更多的 nan 輸出!很快,整個網絡將只是“nan”的一大塊。一旦發生這種情況,就很難看出問題是從哪裡開始的。我們如何隔離“nan”第一次出現的地方?
答案是嘗試*重新初始化*我們的模型。一旦我們開始訓練,我們就會在某個地方得到一個“nan”,它很快就會傳播到整個模型中。所以,讓我們從檢查點加載模型而不做任何權重更新,看看我們從哪裡得到一個 `nan` 值:
```py
model = TFAutoModelForSequenceClassification.from_pretrained(model_checkpoint)
model(batch)
```
當我們運行它時,我們得到:
```py out
TFSequenceClassifierOutput(loss=<tf.Tensor: shape=(16,), dtype=float32, numpy=
array([0.6844486 , nan, nan, 0.67127866, 0.7068601 ,
nan, 0.69309855, nan, 0.65531296, nan,
nan, nan, 0.675402 , nan, nan,
0.69831556], dtype=float32)>, logits=<tf.Tensor: shape=(16, 2), dtype=float32, numpy=
array([[-0.04761693, -0.06509043],
[-0.0481936 , -0.04556257],
[-0.0040929 , -0.05848458],
[-0.02417453, -0.0684005 ],
[-0.02517801, -0.05241832],
[-0.04514256, -0.0757378 ],
[-0.02656011, -0.02646275],
[ 0.00766164, -0.04350497],
[ 0.02060014, -0.05655622],
[-0.02615328, -0.0447021 ],
[-0.05119278, -0.06928903],
[-0.02859691, -0.04879177],
[-0.02210129, -0.05791225],
[-0.02363213, -0.05962167],
[-0.05352269, -0.0481673 ],
[-0.08141848, -0.07110836]], dtype=float32)>, hidden_states=None, attentions=None)
```
*現在*我們到了某個地方! 我們的 logits 中沒有 `nan` 值,這令人放心。 但是我們確實在損失中看到了一些“nan”值! 這些樣本有什麼特別導致這個問題的嗎? 讓我們看看它們是哪些(請注意,如果您自己運行此代碼,您可能會得到不同的索引,因為數據集已被隨機打亂):
```python
import numpy as np
loss = model(batch).loss.numpy()
indices = np.flatnonzero(np.isnan(loss))
indices
```
```python out
array([ 1, 2, 5, 7, 9, 10, 11, 13, 14])
```
讓我們看看這些來自樣本的輸入id:
```python
input_ids = batch["input_ids"].numpy()
input_ids[indices]
```
```python out
array([[ 101, 2007, 2032, 2001, 1037, 16480, 3917, 2594, 4135,
23212, 3070, 2214, 10170, 1010, 2012, 4356, 1997, 3183,
6838, 12953, 2039, 2000, 1996, 6147, 1997, 2010, 2606,
1012, 102, 6838, 2001, 3294, 6625, 3773, 1996, 2214,
2158, 1012, 102, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0],
[ 101, 1998, 6814, 2016, 2234, 2461, 2153, 1998, 13322,
2009, 1012, 102, 2045, 1005, 1055, 2053, 3382, 2008,
2016, 1005, 2222, 3046, 8103, 2075, 2009, 2153, 1012,
102, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0],
[ 101, 1998, 2007, 1996, 3712, 4634, 1010, 2057, 8108,
2025, 3404, 2028, 1012, 1996, 2616, 18449, 2125, 1999,
1037, 9666, 1997, 4100, 8663, 11020, 6313, 2791, 1998,
2431, 1011, 4301, 1012, 102, 2028, 1005, 1055, 5177,
2110, 1998, 3977, 2000, 2832, 2106, 2025, 2689, 2104,
2122, 6214, 1012, 102, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0],
[ 101, 1045, 2001, 1999, 1037, 13090, 5948, 2007, 2048,
2308, 2006, 2026, 5001, 2043, 2026, 2171, 2001, 2170,
1012, 102, 1045, 2001, 3564, 1999, 2277, 1012, 102,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0],
[ 101, 2195, 4279, 2191, 2039, 1996, 2181, 2124, 2004,
1996, 2225, 7363, 1012, 102, 2045, 2003, 2069, 2028,
2451, 1999, 1996, 2225, 7363, 1012, 102, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0],
[ 101, 2061, 2008, 1045, 2123, 1005, 1056, 2113, 2065,
2009, 2428, 10654, 7347, 2030, 2009, 7126, 2256, 2495,
2291, 102, 2009, 2003, 5094, 2256, 2495, 2291, 2035,
2105, 1012, 102, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0],
[ 101, 2051, 1010, 2029, 3216, 2019, 2503, 3444, 1010,
6732, 1996, 2265, 2038, 19840, 2098, 2125, 9906, 1998,
2003, 2770, 2041, 1997, 4784, 1012, 102, 2051, 6732,
1996, 2265, 2003, 9525, 1998, 4569, 1012, 102, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0],
[ 101, 1996, 10556, 2140, 11515, 2058, 1010, 2010, 2162,
2252, 5689, 2013, 2010, 7223, 1012, 102, 2043, 1996,
10556, 2140, 11515, 2058, 1010, 2010, 2252, 3062, 2000,
1996, 2598, 1012, 102, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0],
[ 101, 13543, 1999, 2049, 6143, 2933, 2443, 102, 2025,
13543, 1999, 6143, 2933, 2003, 2443, 102, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0]])
```
嗯,這裡有很多東西,但沒有什麼不尋常的。 讓我們看看標籤:
```python out
labels = batch['labels'].numpy()
labels[indices]
```
```python out
array([2, 2, 2, 2, 2, 2, 2, 2, 2])
```
啊! `nan` 樣本都具有相同的標籤,即標籤 2。這是一個非常明顯的提示。 當我們的標籤為 2 時,我們會得到loss為 `nan`,這表明這是檢查模型中標籤數量的好時機:
```python
model.config.num_labels
```
```python out
2
```
現在我們看到了問題:模型認為只有兩個類,但標籤上升到 2,這意味著實際上有三個類(因為 0 也是一個類)。 這就是我們得到“nan”的方式——通過嘗試計算不存在的類的損失! 讓我們嘗試改變它並再次擬合模型:
```
model = TFAutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=3)
model.compile(optimizer='adam')
model.fit(train_dataset)
```
```python out
869/24543 [>.............................] - ETA: 15:29 - loss: 1.1032
```
我們在訓練! 沒有更多的'nan's,我們的損失正在減少......有點。 如果你觀察一段時間,你可能會開始有點不耐煩,因為損失值一直居高不下。 讓我們在這裡停止訓練並嘗試考慮可能導致此問題的原因。 在這一點上,我們很確定數據和模型都沒有問題,但是我們的模型並沒有很好地學習。 還剩下什麼? 是時候...
### 檢查你的超參數
如果你回頭看上面的代碼,你可能根本看不到任何超參數,除了 `batch_size`,這似乎不是罪魁禍首。不過,不要被迷惑;總是有超參數,如果你看不到它們,那只是意味著你不知道它們的設置是什麼。特別要記住關於 Keras 的一個關鍵點:如果您使用字符串設置損失函數、優化器或激活函數,_它的所有參數都將設置為它們的默認值_。這意味著即使為此使用字符串非常方便,但在這樣做時您應該非常小心,因為它很容易對您隱藏關鍵的事情。 (任何嘗試上述方式的人都應該仔細注意這一事實。)
在這種情況下,我們在哪裡設置了帶有字符串的參數?我們最初使用字符串設置損失,但我們不再這樣做了。但是,我們正在使用字符串設置優化器。難道這對我們隱瞞了什麼?讓我們看看[關於它的一些討論](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam)。
這裡有什麼需要注意的嗎?沒錯——學習率!當我們只使用字符串“adam”時,我們將獲得默認的學習率,即 0.001,即 1e-3。這對於transormer模型來說太高了!一般來說,我們建議您的模型嘗試 1e-5 和 1e-4 之間的學習率;這比我們在這裡實際使用的值小 10X 到 100X 之間。聽起來這可能是一個主要問題,所以讓我們嘗試減少它。為此,我們需要導入實際的“優化器”對象。當我們這樣做的時候,讓我們從檢查點重新初始化模型,以防高學習率的訓練損壞了它的權重:
```python
from tensorflow.keras.optimizers import Adam
model = TFAutoModelForSequenceClassification.from_pretrained(model_checkpoint)
model.compile(optimizer=Adam(5e-5))
```
<Tip>
💡您還可以從🤗 Transformers 中導入 `create_optimizer()` 函數,這將為您提供具有正確權重衰減以及學習率預熱和學習率衰減的 AdamW 優化器。 此優化器通常會產生比使用默認 Adam 優化器獲得的結果稍好一些的結果。
</Tip>
現在,我們可以嘗試使用新的、改進後的學習率來擬合模型:
```python
model.fit(train_dataset)
```
```python out
319/24543 [..............................] - ETA: 16:07 - loss: 0.9718
```
現在我們的損失真的在某個地方! 訓練終於看起來奏效了。 這裡有一個教訓:當你的模型正在運行但損失沒有下降,並且你確定你的數據沒問題時,檢查學習率和權重衰減等超參數是個好主意。 將其中任何一個設置得太高很可能導致訓練在高損失值下“停滯”。
## 其他潛在問題
我們已經涵蓋了上面腳本中的問題,但您可能會遇到其他幾個常見錯誤。 讓我們看一個(非常不完整的)列表。
### 處理內存不足錯誤
內存不足的跡象是“分配張量時出現 OOM”之類的錯誤——OOM 是“內存不足”的縮寫。 在處理大型語言模型時,這是一個非常常見的危險。 如果遇到這種情況,一個好的策略是將批量大小減半並重試。 但請記住,有些型號*非常*大。 例如,全尺寸 GPT-2 的參數為 1.5B,這意味著您將需要 6 GB 的內存來存儲模型,另外需要 6 GB 的內存用於梯度下降! 無論您使用什麼批量大小,訓練完整的 GPT-2 模型通常需要超過 20 GB 的 VRAM,而只有少數 GPU 擁有。 像“distilbert-base-cased”這樣更輕量級的模型更容易運行,訓練也更快。
<Tip>
在課程的下一部分中,我們將介紹更先進的技術,這些技術可以幫助您減少內存佔用並讓您微調最大的模型。
</Tip>
### TensorFlow 🦛餓餓
您應該注意的 TensorFlow 的一個特殊怪癖是,它會在您加載模型或進行任何訓練後立即為自己分配 *所有 * GPU 內存,然後根據需要分配該內存。這與其他框架的行為不同,例如 PyTorch,後者根據 CUDA 的需要分配內存,而不是在內部進行。 TensorFlow 方法的一個優點是,當您耗盡內存時,它通常會給出有用的錯誤,並且它可以從該狀態恢復而不會導致整個 CUDA 內核崩潰。但也有一個重要的缺點:如果你同時運行兩個 TensorFlow 進程,那麼**你將度過一段糟糕的時光**。
如果您在 Colab 上運行,則無需擔心這一點,但如果您在本地運行,這絕對是您應該小心的事情。特別要注意,關閉筆記本選項卡並不一定會關閉該筆記本!您可能需要選擇正在運行的筆記本(帶有綠色圖標的筆記本)並在目錄列表中手動關閉它們。任何使用 TensorFlow 的正在運行的筆記本仍可能佔用大量 GPU 內存,這意味著您啟動的任何新筆記本都可能會遇到一些非常奇怪的問題。
如果您開始運行之前正確的代碼卻收到有關 CUDA、BLAS 或 cuBLAS 的錯誤,這通常是罪魁禍首。您可以使用類似 `nvidia-smi` 的命令來檢查 - 當您關閉或重新啟動當前筆記本時,您的大部分內存是否空閒,或者是否仍在使用中?如果它仍在使用中,則有其他東西在佔用它!
### 檢查您的數據(再次!)
只有在理論上可以從您的數據中學到任何東西時,您的模型才會學到一些東西。 如果存在損壞數據的錯誤或標籤是隨機屬性的,那麼您很可能不會在數據集上獲得任何知識。這裡一個有用的工具是`tokenizer.decode()`。 這會將 `input_ids` 轉換回字符串,因此您可以查看數據並查看您的訓練數據是否正在教授您希望它教授的內容。 例如,像我們上面所做的那樣從 `tf.data.Dataset` 中獲取 `batch` 後,您可以像這樣解碼第一個元素:
```py
input_ids = batch["input_ids"].numpy()
tokenizer.decode(input_ids[0])
```
Then you can compare it with the first label, like so:
```py
labels = batch["labels"].numpy()
label = labels[0]
```
一旦您可以像這樣查看您的數據,您可以問自己以下問題:
- 解碼後的數據是否可以理解?
- 你認同這些標籤嗎?
- 有沒有一個標籤比其他標籤更常見?
- 如果模型預測隨機的答案/總是相同的答案,那麼loss/評估指標應該是多少?
查看您的數據後,查看模型的一些預測並對其進行解碼。 如果模型總是預測同樣的事情,那可能是因為你的數據集偏向一個類別(針對分類問題); 過採樣稀有類等技術可能會有所幫助。
如果您在初始模型上獲得的loss/評估指標與您期望的隨機預測的loss/評估指標非常不同,請仔細檢查您的loss或評估指標的計算方式,因為那裡可能存在錯誤。 如果您使用最後添加的多個loss,請確保它們具有相同的規模。
當您確定您的數據是完美的時,您可以通過一個簡單的測試來查看模型是否能夠對其進行訓練。
### 在一批上過度擬合你的模型
過度擬合通常是我們在訓練時儘量避免的事情,因為這意味著模型沒有學習識別我們想要的一般特徵,而只是記住了訓練樣本。 但是,一遍又一遍地嘗試在一個批次上訓練您的模型是一個很好的測試,可以檢查您構建的問題是否可以通過您嘗試訓練的模型來解決。 它還將幫助您查看您的初始學習率是否太高。
一旦你定義了你的“模型”,這樣做真的很容易; 只需獲取一批訓練數據,然後將該“批次”視為您的整個數據集,並在其上fit大量epoch:
```py
for batch in train_dataset:
break
# Make sure you have run model.compile() and set your optimizer,
# and your loss/metrics if you're using them
model.fit(batch, epochs=20)
```
<Tip>
💡 如果您的訓練數據不平衡,請確保構建一批包含所有標籤的訓練數據。
</Tip>
生成的模型在“批次”上應該有接近完美的結果,損失迅速下降到 0(或您正在使用的損失的最小值)。
如果你沒有設法讓你的模型獲得這樣的完美結果,這意味著你構建問題或數據的方式有問題,所以你應該修復它。 只有當你設法通過過擬合測試時,你才能確定你的模型實際上可以學到一些東西。
<Tip warning={true}>
⚠️ 在此測試之後,您將不得不重新創建您的模型和“Trainer”,因為獲得的模型可能無法在您的完整數據集上恢復和學習有用的東西。
</Tip>
### 在你有第一個基線之前不要調整任何東西
超參數調整總是被強調為機器學習中最難的部分,但這只是幫助您在指標上獲得一點點提升的最後一步。 例如將默認的 Adam 學習率 1e-3 與 Transformer 模型一起使用,當然會使學習進行得非常緩慢或完全停止,但大多數時候“合理”的超參數,例如從 1e-5 到 5e-5 的學習率,會很好地給你帶來好的結果。因此,在您獲得超出數據集基線的東西之前,不要開始進行耗時且昂貴的超參數搜索。
一旦你有一個足夠好的模型,你就可以開始稍微調整一下。 不要嘗試使用不同的超參數啟動一千次運行,而是比較一個超參數的不同值的幾次運行,以瞭解哪個影響最大。
如果您正在調整模型本身,不要嘗試任何您無法合理證明的事情。 始終確保您返回過擬合測試以驗證您的更改沒有產生任何意外後果。
### 請求幫忙
希望您會在本節中找到一些可以幫助您解決問題的建議,但如果不是這樣,請記住您可以隨時在 [論壇](https://discuss.huggingface.co/) 上向社區提問。
以下是一些可能有用的額外資源:
- [“作為工程最佳實踐工具的再現性”](https://docs.google.com/presentation/d/1yHLPvPhUs2KGI5ZWo0sU-PKU3GimAk3iTsI38Z-B5Gw/edit#slide=id.p),作者:Joel Grus
- [“神經網絡調試清單”](https://towardsdatascience.com/checklist-for-debugging-neural-networks-d8b2a9434f21) 作者:Cecelia Shao
- [“如何對機器學習代碼進行單元測試”](https://medium.com/@keeper6928/how-to-unit-test-machine-learning-code-57cf6fd81765) by Chase Roberts
- [“訓練神經網絡的秘訣”](http://karpathy.github.io/2019/04/25/recipe/)作者:Andrej Karpathy
當然,並不是你在訓練神經網絡時遇到的每一個問題都是你自己的錯! 如果您在 🤗 Transformers 或 🤗 Datasets 庫中遇到看起來不正確的內容,您可能遇到了錯誤。 你應該告訴我們這一切,在下一節中,我們將準確解釋如何做到這一點。
| course/chapters/zh-TW/chapter8/4_tf.mdx/0 | {
"file_path": "course/chapters/zh-TW/chapter8/4_tf.mdx",
"repo_id": "course",
"token_count": 20954
} | 161 |
1
00:00:05,850 --> 00:00:07,713
Welcome to the Hugging Face Course.
2
00:00:08,550 --> 00:00:10,320
This course has been designed to teach you
3
00:00:10,320 --> 00:00:12,750
all about the Hugging Face ecosystem,
4
00:00:12,750 --> 00:00:14,700
how to use the dataset and model hub
5
00:00:14,700 --> 00:00:16,803
as well as all our open-source libraries.
6
00:00:18,300 --> 00:00:19,950
Here is the Table of Contents.
7
00:00:19,950 --> 00:00:22,770
As you can see, it's
divided in three sections
8
00:00:22,770 --> 00:00:25,110
which become progressively more advanced.
9
00:00:25,110 --> 00:00:28,500
At this stage, the first two
sections have been released.
10
00:00:28,500 --> 00:00:30,120
So first, we'll teach you the basics
11
00:00:30,120 --> 00:00:32,250
of how to use a Transformer model,
12
00:00:32,250 --> 00:00:34,230
fine-tune it on your own data set
13
00:00:34,230 --> 00:00:36,960
and share the result with the community.
14
00:00:36,960 --> 00:00:39,420
So second, we'll dive
deeper into our libraries
15
00:00:39,420 --> 00:00:42,360
and teach you how to tackle any NLP task.
16
00:00:42,360 --> 00:00:44,430
We're actively working on the last one
17
00:00:44,430 --> 00:00:47,280
and hope to have it ready for
you for the spring of 2022.
18
00:00:48,510 --> 00:00:50,880
The first chapter requires
no technical knowledge
19
00:00:50,880 --> 00:00:52,320
and is a good introduction to learn
20
00:00:52,320 --> 00:00:54,180
what Transformers models can do
21
00:00:54,180 --> 00:00:56,883
and how it could be of use
to you or your company.
22
00:00:58,050 --> 00:01:01,110
The next chapters require
a good knowledge of Python
23
00:01:01,110 --> 00:01:02,130
and some basic knowledge of
24
00:01:02,130 --> 00:01:04,350
Machine Learning and Deep Learning.
25
00:01:04,350 --> 00:01:07,110
If you don't know what a
training and validation set are
26
00:01:07,110 --> 00:01:09,360
or what gradient decent means,
27
00:01:09,360 --> 00:01:11,340
you should look at an introductory course
28
00:01:11,340 --> 00:01:14,863
such as the ones published by
deeplearning.ai or fast.ai.
29
00:01:16,200 --> 00:01:17,910
It's also best if you have some basics
30
00:01:17,910 --> 00:01:21,150
in one Deep Learning Framework,
PyTorch or TensorFlow.
31
00:01:21,150 --> 00:01:23,520
Each part of the material
introduced in this course
32
00:01:23,520 --> 00:01:25,590
has a version in both those frameworks,
33
00:01:25,590 --> 00:01:26,730
so you will be able to pick the one
34
00:01:26,730 --> 00:01:28,230
you are most comfortable with.
35
00:01:29,550 --> 00:01:31,740
This is the team that
developed this course.
36
00:01:31,740 --> 00:01:33,120
I'll now let each of the speakers
37
00:01:33,120 --> 00:01:34,570
introduce themselves briefly.
38
00:01:37,230 --> 00:01:38,880
- Hi, my name is Matthew,
39
00:01:38,880 --> 00:01:41,610
and I'm a Machine Learning
Engineer at Hugging Face.
40
00:01:41,610 --> 00:01:43,200
I work on the open-source team
41
00:01:43,200 --> 00:01:45,180
and I'm responsible for
maintaining particularly
42
00:01:45,180 --> 00:01:47,280
the TensorFlow code there.
43
00:01:47,280 --> 00:01:50,130
Previously, I was a Machine
Learning Engineer at Parsley,
44
00:01:50,130 --> 00:01:52,620
who've recently been
acquired by Automatic,
45
00:01:52,620 --> 00:01:54,210
and I was a postdoctoral researcher
46
00:01:54,210 --> 00:01:57,000
before that at Trinity
College, Dublin in Ireland
47
00:01:57,000 --> 00:02:00,093
working on computational
genetics and retinal disease.
48
00:02:02,400 --> 00:02:03,870
- Hi, I'm Lysandre.
49
00:02:03,870 --> 00:02:05,640
I'm a Machine Learning
Engineer at Hugging Face
50
00:02:05,640 --> 00:02:08,700
and I'm specifically part
of the open-source team.
51
00:02:08,700 --> 00:02:10,890
I've been at Hugging
Face for a few years now
52
00:02:10,890 --> 00:02:12,300
and alongside my team members,
53
00:02:12,300 --> 00:02:13,890
I've been working on most of the tools
54
00:02:13,890 --> 00:02:15,790
that you'll get to see in this course.
55
00:02:18,270 --> 00:02:20,130
- Hi, I'm Sylvain.
56
00:02:20,130 --> 00:02:22,140
I'm a Research Engineer at Hugging Face
57
00:02:22,140 --> 00:02:25,830
and one of the main maintainers
of the Transformers Library.
58
00:02:25,830 --> 00:02:28,110
Previously, I worked at fast.ai
59
00:02:28,110 --> 00:02:30,420
where I helped develop the fast.ai Library
60
00:02:30,420 --> 00:02:32,220
as well as the online book.
61
00:02:32,220 --> 00:02:35,340
Before that, I was a math
and computer science teacher
62
00:02:35,340 --> 00:02:36,173
in France.
63
00:02:38,550 --> 00:02:41,340
- Hi, my name is Sasha and I'm
a Researcher at Hugging Face,
64
00:02:41,340 --> 00:02:42,420
working on the ethical,
65
00:02:42,420 --> 00:02:46,230
environmental and social impacts
of machine learning models.
66
00:02:46,230 --> 00:02:49,020
Previously, I was a
postdoctoral researcher at Mila,
67
00:02:49,020 --> 00:02:50,400
University in Montreal
68
00:02:50,400 --> 00:02:53,040
and I also worked as an
Applied AI Researcher
69
00:02:53,040 --> 00:02:55,140
for the United Nations Global Pulse.
70
00:02:55,140 --> 00:02:57,300
I've been involved in
projects such as CodeCarbon
71
00:02:57,300 --> 00:02:59,790
and the Machine Learning
Impacts Calculator
72
00:02:59,790 --> 00:03:02,390
to measure the carbon
footprint of machine learning.
73
00:03:05,160 --> 00:03:07,650
- Hi, I'm Merve and I'm
a Developer Advocate
74
00:03:07,650 --> 00:03:09,390
at Hugging Face.
75
00:03:09,390 --> 00:03:12,480
Previously, I was working as
a Machine Learning Engineer
76
00:03:12,480 --> 00:03:15,360
building NLP tools and chat bots.
77
00:03:15,360 --> 00:03:17,670
Currently, I'm working to improve the hub
78
00:03:17,670 --> 00:03:19,563
and democratize machine learning.
79
00:03:22,140 --> 00:03:23,670
- Hello everyone.
80
00:03:23,670 --> 00:03:27,210
My name is Lucile and I'm
a Machine Learning Engineer
81
00:03:27,210 --> 00:03:28,353
at Hugging Face.
82
00:03:29,580 --> 00:03:32,550
To tell you in two sentences who I am,
83
00:03:32,550 --> 00:03:35,590
I work on the development and
support of open-source tools
84
00:03:36,600 --> 00:03:39,595
and I also participate in
several research project
85
00:03:39,595 --> 00:03:41,795
in the field of Natural
Language Processing.
86
00:03:44,610 --> 00:03:45,540
- Good day there.
87
00:03:45,540 --> 00:03:47,550
I'm Lewis and I'm a
Machine Learning Engineer
88
00:03:47,550 --> 00:03:50,130
in the open-source team at Hugging Face.
89
00:03:50,130 --> 00:03:53,490
I'm passionate about developing
tools for the NLP community
90
00:03:53,490 --> 00:03:55,050
and you'll see me at
many of Hugging Face's
91
00:03:55,050 --> 00:03:56,910
outreach activities.
92
00:03:56,910 --> 00:03:58,470
Before joining Hugging Face,
93
00:03:58,470 --> 00:03:59,790
I spent several years developing
94
00:03:59,790 --> 00:04:01,860
machine learning applications for startups
95
00:04:01,860 --> 00:04:04,230
and enterprises in the domains of NLP,
96
00:04:04,230 --> 00:04:07,260
topological data analysis and time series.
97
00:04:07,260 --> 00:04:10,110
In a former life, I was
a theoretical physicist,
98
00:04:10,110 --> 00:04:11,760
where I researched particle collisions
99
00:04:11,760 --> 00:04:13,560
at the Large Hadron Collider and so.
100
00:04:15,900 --> 00:04:18,450
- Hey, I'm Leandro and I'm
a Machine Learning Engineer
101
00:04:18,450 --> 00:04:21,030
in the open-source team at Hugging Face.
102
00:04:21,030 --> 00:04:23,460
Before joining Hugging Face,
I worked as a Data Scientist
103
00:04:23,460 --> 00:04:26,733
in Switzerland and have taught
Data Science at University.
| course/subtitles/en/00_welcome-to-the-hugging-face-course.srt/0 | {
"file_path": "course/subtitles/en/00_welcome-to-the-hugging-face-course.srt",
"repo_id": "course",
"token_count": 3030
} | 162 |
1
00:00:00,479 --> 00:00:03,396
(object whooshing)
2
00:00:05,610 --> 00:00:06,873
- The tokenizer pipeline.
3
00:00:07,920 --> 00:00:10,570
In this video, we'll look
at how a tokenizer converts
4
00:00:11,433 --> 00:00:12,480
raw texts to numbers,
5
00:00:12,480 --> 00:00:14,970
that a Transformer
model can make sense of,
6
00:00:14,970 --> 00:00:16,520
like when we execute this code.
7
00:00:17,760 --> 00:00:18,690
Here is a quick overview
8
00:00:18,690 --> 00:00:21,630
of what happens inside
the tokenizer object:
9
00:00:21,630 --> 00:00:24,360
first, the text is split into tokens,
10
00:00:24,360 --> 00:00:27,453
which are words, parts of
words, or punctuation symbols.
11
00:00:28,440 --> 00:00:31,500
Then the tokenizer adds
potential special tokens
12
00:00:31,500 --> 00:00:34,680
and converts each token to
their unique respective ID
13
00:00:34,680 --> 00:00:36,843
as defined by the tokenizer's vocabulary.
14
00:00:37,710 --> 00:00:40,380
As we'll see, it doesn't
quite happen in this order,
15
00:00:40,380 --> 00:00:43,233
but doing it like this is
better for understandings.
16
00:00:44,280 --> 00:00:47,670
The first step is to split
our input text into tokens.
17
00:00:47,670 --> 00:00:49,653
We use the tokenize method for this.
18
00:00:50,550 --> 00:00:54,030
To do that, the tokenizer may
first perform some operations,
19
00:00:54,030 --> 00:00:56,880
like lowercasing all words,
then follow a set of rules
20
00:00:56,880 --> 00:00:59,283
to split the result in
small chunks of text.
21
00:01:00,480 --> 00:01:02,286
Most of the Transformer models uses
22
00:01:02,286 --> 00:01:04,890
a word tokenization algorithm, which means
23
00:01:04,890 --> 00:01:06,750
that one given word can be split
24
00:01:06,750 --> 00:01:10,050
in several tokens like tokenize here.
25
00:01:10,050 --> 00:01:12,570
Look at the "Tokenization
algorithms" video link below
26
00:01:12,570 --> 00:01:13,743
for more information.
27
00:01:14,760 --> 00:01:17,820
The # # prefix we see in front of ize is
28
00:01:17,820 --> 00:01:19,830
a convention used by Bert to indicate
29
00:01:19,830 --> 00:01:22,762
this token is not the
beginning of the word.
30
00:01:22,762 --> 00:01:26,310
Other tokenizers may use
different conventions however:
31
00:01:26,310 --> 00:01:29,984
for instance, ALBERT tokenizers
will add a long underscore
32
00:01:29,984 --> 00:01:31,620
in front of all the tokens
33
00:01:31,620 --> 00:01:34,920
that added space before them,
which is a convention shared
34
00:01:34,920 --> 00:01:37,700
by all sentencepiece tokenizers.
35
00:01:38,580 --> 00:01:41,040
The second step of the
tokenization pipeline is
36
00:01:41,040 --> 00:01:43,470
to map those tokens to
their respective IDs
37
00:01:43,470 --> 00:01:45,770
as defined by the
vocabulary of the tokenizer.
38
00:01:46,770 --> 00:01:48,690
This is why we need to download the file
39
00:01:48,690 --> 00:01:50,580
when we instantiate a tokenizer
40
00:01:50,580 --> 00:01:52,400
with the from_pretrained method.
41
00:01:52,400 --> 00:01:54,390
We have to make sure
we use the same mapping
42
00:01:54,390 --> 00:01:56,520
as when the model was pretrained.
43
00:01:56,520 --> 00:01:59,703
To do this, we use the
convert_tokens_to_ids method.
44
00:02:01,050 --> 00:02:01,883
We may have noticed
45
00:02:01,883 --> 00:02:03,540
that we don't have the exact same results
46
00:02:03,540 --> 00:02:05,580
as in our first slide, or not
47
00:02:05,580 --> 00:02:07,920
as this looks like a list
of random numbers anyway,
48
00:02:07,920 --> 00:02:10,680
in which case, allow me
to refresh your memory.
49
00:02:10,680 --> 00:02:12,350
We had a the number at
the beginning and a number
50
00:02:12,350 --> 00:02:17,130
at the end that are missing,
those are the special tokens.
51
00:02:17,130 --> 00:02:20,340
The special tokens are added
by the prepare_for_model method
52
00:02:20,340 --> 00:02:22,350
which knows the indices of this token
53
00:02:22,350 --> 00:02:25,680
in the vocabulary and just
adds the proper numbers.
54
00:02:25,680 --> 00:02:27,243
in the input IDs list.
55
00:02:28,590 --> 00:02:29,541
You can look at the special tokens
56
00:02:29,541 --> 00:02:30,990
and, more generally,
57
00:02:30,990 --> 00:02:33,870
at how the tokenizer
has changed your text,
58
00:02:33,870 --> 00:02:35,280
by using the decode method
59
00:02:35,280 --> 00:02:37,503
on the outputs of the tokenizer object.
60
00:02:38,490 --> 00:02:39,423
As for the prefix for beginning
61
00:02:39,423 --> 00:02:44,160
of words/ part of words, for
special tokens vary depending
62
00:02:44,160 --> 00:02:46,500
on which tokenizer you're using.
63
00:02:46,500 --> 00:02:48,810
So that tokenizer uses CLS and SEP,
64
00:02:48,810 --> 00:02:52,417
but the roberta tokenizer
uses HTML-like anchors
65
00:02:52,417 --> 00:02:55,230
<s> and </s>.
66
00:02:55,230 --> 00:02:57,090
Now that you know how the tokenizer works,
67
00:02:57,090 --> 00:02:59,390
you can forget all those
intermediate methods,
68
00:03:00,283 --> 00:03:01,650
and then you remember that
you just have to call it
69
00:03:01,650 --> 00:03:02,913
on your input texts.
70
00:03:03,870 --> 00:03:05,310
The output of a tokenizer don't
71
00:03:05,310 --> 00:03:07,853
just contain the input IDs, however.
72
00:03:07,853 --> 00:03:09,750
To learn what the attention mask is,
73
00:03:09,750 --> 00:03:12,360
check out the "Batch
input together" video.
74
00:03:12,360 --> 00:03:14,220
To learn about token type IDs,
75
00:03:14,220 --> 00:03:16,570
look at the "Process
pairs of sentences" video.
76
00:03:18,003 --> 00:03:20,920
(object whooshing)
| course/subtitles/en/16_the-tokenization-pipeline.srt/0 | {
"file_path": "course/subtitles/en/16_the-tokenization-pipeline.srt",
"repo_id": "course",
"token_count": 2263
} | 163 |
1
00:00:04,200 --> 00:00:06,210
- [Instructor] In this video,
we're going to understand how
2
00:00:06,210 --> 00:00:08,280
to manage a model repository
3
00:00:08,280 --> 00:00:10,053
on the Hugging Face Hub Model Hub.
4
00:00:10,920 --> 00:00:13,020
In order to handle a repository
5
00:00:13,020 --> 00:00:15,450
you should first have
a Hugging Face account.
6
00:00:15,450 --> 00:00:17,610
A link to create a new
account is available
7
00:00:17,610 --> 00:00:18,573
in the description.
8
00:00:20,130 --> 00:00:22,980
Once you are logged in, you
can create a new repository
9
00:00:22,980 --> 00:00:25,890
by clicking on the new model option.
10
00:00:25,890 --> 00:00:29,400
You should be facing a similar
modal to the following.
11
00:00:29,400 --> 00:00:33,240
In the owner input, you can
put either your own namespace
12
00:00:33,240 --> 00:00:35,703
or any of your organization's namespaces.
13
00:00:36,660 --> 00:00:39,330
The model name is the model identifier
14
00:00:39,330 --> 00:00:40,320
that will then be used
15
00:00:40,320 --> 00:00:43,143
to identify your model
on the chosen namespace.
16
00:00:44,130 --> 00:00:47,700
The final choice is
between public and private.
17
00:00:47,700 --> 00:00:49,950
Public models are accessible by anyone.
18
00:00:49,950 --> 00:00:51,840
This is the recommended free option,
19
00:00:51,840 --> 00:00:54,960
as this makes your model easily
accessible and shareable.
20
00:00:54,960 --> 00:00:57,630
The owners of your
namespace are the only ones
21
00:00:57,630 --> 00:00:59,523
who can update and change your model.
22
00:01:00,450 --> 00:01:03,660
A more advanced option
is the private option.
23
00:01:03,660 --> 00:01:04,560
In this case,
24
00:01:04,560 --> 00:01:06,000
only the owners of your namespace
25
00:01:06,000 --> 00:01:08,280
will have visibility over your model.
26
00:01:08,280 --> 00:01:10,260
Other users won't know it exists
27
00:01:10,260 --> 00:01:11,810
and will not be able to use it.
28
00:01:15,030 --> 00:01:17,030
Let's create a dummy model to play with.
29
00:01:18,180 --> 00:01:19,710
Once your model is created,
30
00:01:19,710 --> 00:01:22,230
comes the management of that model.
31
00:01:22,230 --> 00:01:24,360
Three tabs are available to you.
32
00:01:24,360 --> 00:01:27,960
You're facing the first one,
which is the model card page.
33
00:01:27,960 --> 00:01:29,970
This is the page you use
to showcase your model
34
00:01:29,970 --> 00:01:31,110
to the world.
35
00:01:31,110 --> 00:01:33,260
We'll see how it can
be completed in a bit.
36
00:01:34,500 --> 00:01:37,503
The second one is the
files and versions tab.
37
00:01:38,340 --> 00:01:40,920
Your model itself is a Git repository.
38
00:01:40,920 --> 00:01:43,230
If you're unaware of
what is a Git repository,
39
00:01:43,230 --> 00:01:46,320
you can think of it as a
folder containing files.
40
00:01:46,320 --> 00:01:48,120
If you have never used Git before,
41
00:01:48,120 --> 00:01:50,100
we recommend looking at an introduction
42
00:01:50,100 --> 00:01:52,600
like the one provided in
this video's description.
43
00:01:53,850 --> 00:01:56,910
The Git repository allows you
to see the changes happening
44
00:01:56,910 --> 00:02:00,900
over time in this folder,
hence the term versions.
45
00:02:00,900 --> 00:02:03,453
We'll see how to add files
and versions in a bit.
46
00:02:07,020 --> 00:02:09,570
The final tab is the settings tab,
47
00:02:09,570 --> 00:02:12,120
which allows you to manage
your model's visibility
48
00:02:12,120 --> 00:02:13,203
and availability.
49
00:02:14,790 --> 00:02:17,673
Let's first start by adding
files to the repository.
50
00:02:18,540 --> 00:02:19,560
Files can be added
51
00:02:19,560 --> 00:02:23,340
through the web interface
thanks to the add file button.
52
00:02:23,340 --> 00:02:27,060
The added files can be of
any type, python, JSON, text,
53
00:02:27,060 --> 00:02:27,893
you name it.
54
00:02:28,740 --> 00:02:31,170
Alongside your added file and its content,
55
00:02:31,170 --> 00:02:33,363
you should name your change or commit.
56
00:02:36,330 --> 00:02:38,400
Generally, adding files is simpler
57
00:02:38,400 --> 00:02:40,770
by using the Hugging
Face Hub Python library
58
00:02:40,770 --> 00:02:43,050
or by using the command-line.
59
00:02:43,050 --> 00:02:44,310
We'll showcase how to do this
60
00:02:44,310 --> 00:02:46,290
using the Hugging Face Hub Python library,
61
00:02:46,290 --> 00:02:48,060
and there is a link in the description
62
00:02:48,060 --> 00:02:49,800
to the previous version of this video,
63
00:02:49,800 --> 00:02:52,743
showcasing how to do this
using Git and the command-line.
64
00:02:53,610 --> 00:02:54,840
First, make sure you're logged
65
00:02:54,840 --> 00:02:56,460
into your Hugging Face account,
66
00:02:56,460 --> 00:02:59,523
either through the command-line
or in a Python runtime.
67
00:03:04,634 --> 00:03:06,390
The first approach we'll take a look at
68
00:03:06,390 --> 00:03:08,880
is using the upload file method.
69
00:03:08,880 --> 00:03:10,770
This offers an extremely simple API
70
00:03:10,770 --> 00:03:12,630
to upload files through the hub.
71
00:03:12,630 --> 00:03:14,190
The three required parameters
72
00:03:14,190 --> 00:03:16,083
are the current location of the file,
73
00:03:18,570 --> 00:03:21,300
the path of that file in the repository,
74
00:03:21,300 --> 00:03:24,050
and the idea of the repository
to which you're pushing.
75
00:03:25,650 --> 00:03:27,930
There are a few additional parameters.
76
00:03:27,930 --> 00:03:29,100
The token parameter,
77
00:03:29,100 --> 00:03:31,200
if you would like to
specify a different token
78
00:03:31,200 --> 00:03:33,650
than the one saved in your
cache with your login,
79
00:03:34,830 --> 00:03:36,750
the repo type parameter,
80
00:03:36,750 --> 00:03:40,503
if you would like to push
to a data set or a space.
81
00:03:42,300 --> 00:03:45,690
We'll upload a file called
readme.md to the repository
82
00:03:45,690 --> 00:03:47,190
using this method.
83
00:03:47,190 --> 00:03:49,710
We first start by saving
a file with that name,
84
00:03:49,710 --> 00:03:51,210
which contains some information
85
00:03:51,210 --> 00:03:52,920
about the repository itself.
86
00:03:52,920 --> 00:03:54,243
Here, a title.
87
00:03:55,950 --> 00:03:57,420
Now that the file is saved,
88
00:03:57,420 --> 00:04:00,513
let's use the upload file
method to upload it to the hub.
89
00:04:01,560 --> 00:04:03,540
If we switch to the web
interface for a second
90
00:04:03,540 --> 00:04:07,080
and refresh the page, we'll
see that the README is shown.
91
00:04:07,080 --> 00:04:08,883
The file upload was a success.
92
00:04:10,170 --> 00:04:13,500
Alongside this method
exists a delete file method
93
00:04:13,500 --> 00:04:16,170
so that you may manage
your repository fully.
94
00:04:16,170 --> 00:04:18,820
We'll use it to delete the
file we have just created.
95
00:04:22,860 --> 00:04:25,320
If we refresh the page once again, good,
96
00:04:25,320 --> 00:04:26,973
the file was indeed deleted.
97
00:04:29,070 --> 00:04:32,730
This approach using only these
two methods is super simple.
98
00:04:32,730 --> 00:04:35,400
It doesn't need Git or Git LFS installed,
99
00:04:35,400 --> 00:04:37,650
but it does come with a limitation.
100
00:04:37,650 --> 00:04:39,600
The maximum file size one can upload
101
00:04:39,600 --> 00:04:41,313
is limited to five gigabytes.
102
00:04:42,360 --> 00:04:43,890
To overcome this limit,
103
00:04:43,890 --> 00:04:45,540
let's take a look at the second method
104
00:04:45,540 --> 00:04:47,643
which is the repository utility.
105
00:04:48,600 --> 00:04:51,840
This class is a wrapper over
Git and Git LFS methods,
106
00:04:51,840 --> 00:04:53,850
which abstracts most of the complexity
107
00:04:53,850 --> 00:04:55,500
and offers a flexible API
108
00:04:55,500 --> 00:04:57,990
to manage your online repositories.
109
00:04:57,990 --> 00:04:59,690
Let's take a look at how it works.
110
00:05:03,870 --> 00:05:08,369
We first start by instantiating
the repository utility.
111
00:05:08,369 --> 00:05:10,380
We provide the clone from parameter,
112
00:05:10,380 --> 00:05:13,383
in order to clone the
repository we just created.
113
00:05:14,400 --> 00:05:18,750
The repository is now
cloned in the local folder.
114
00:05:18,750 --> 00:05:22,200
The repo object that we
have just initialized
115
00:05:22,200 --> 00:05:24,873
offers quite a few methods
which are useful for us.
116
00:05:25,920 --> 00:05:28,800
We're interested in
pushing a model to the hub.
117
00:05:28,800 --> 00:05:31,170
I'll start by loading
a model and tokenizer
118
00:05:31,170 --> 00:05:32,643
I trained a few hours ago.
119
00:05:34,380 --> 00:05:36,810
We'll now follow the
traditional Git approach
120
00:05:36,810 --> 00:05:38,670
by first pulling latest changes
121
00:05:38,670 --> 00:05:40,053
using the Git pull method.
122
00:05:40,980 --> 00:05:43,170
We just cloned the repository,
123
00:05:43,170 --> 00:05:45,780
so unless this is a
super active repository,
124
00:05:45,780 --> 00:05:48,660
it's unlikely that new
changes are available.
125
00:05:48,660 --> 00:05:51,000
But it's always a good idea
to pull the latest changes
126
00:05:51,000 --> 00:05:52,300
before doing anything new.
127
00:05:53,220 --> 00:05:55,200
Now that we have pulled the repository,
128
00:05:55,200 --> 00:05:58,500
I'll save the model and
tokenizer inside that folder.
129
00:05:58,500 --> 00:06:01,200
This includes the model
weights, configuration file,
130
00:06:01,200 --> 00:06:02,673
and tokenizer files.
131
00:06:04,440 --> 00:06:05,820
Now that the model is saved,
132
00:06:05,820 --> 00:06:07,890
we'll continue with the
traditional Git approach
133
00:06:07,890 --> 00:06:10,620
and push it to the remote repository.
134
00:06:10,620 --> 00:06:12,150
If we were using the command-line,
135
00:06:12,150 --> 00:06:14,250
there are a few Git LFS specific commands
136
00:06:14,250 --> 00:06:15,600
we would have to invoke.
137
00:06:15,600 --> 00:06:17,940
But here, the Hugging Face hub package
138
00:06:17,940 --> 00:06:20,070
takes care of all of that.
139
00:06:20,070 --> 00:06:24,420
We'll start by staging the
files using the Git add method.
140
00:06:24,420 --> 00:06:27,600
We'll then commit these changes
using Git commit method,
141
00:06:27,600 --> 00:06:30,690
and providing a helpful commit message.
142
00:06:30,690 --> 00:06:33,210
Finally, we'll push the
changes to the remote,
143
00:06:33,210 --> 00:06:34,953
using the Git push method.
144
00:06:45,090 --> 00:06:47,430
If we go back to the
files and versions tab,
145
00:06:47,430 --> 00:06:49,950
we can now see the newly committed files.
146
00:06:49,950 --> 00:06:52,600
We can even play with the
model in the inference API.
147
00:06:53,790 --> 00:06:55,770
Unfortunately, the front page of our model
148
00:06:55,770 --> 00:06:57,540
is still very empty.
149
00:06:57,540 --> 00:06:59,280
Let's add a README markdown file
150
00:06:59,280 --> 00:07:00,753
to complete it a little bit.
151
00:07:01,710 --> 00:07:04,200
This README is known as the model card
152
00:07:04,200 --> 00:07:06,030
and it's arguably as important
153
00:07:06,030 --> 00:07:09,330
as the model and tokenizer
files in the model repository.
154
00:07:09,330 --> 00:07:11,280
It is the central definition
155
00:07:11,280 --> 00:07:13,200
and documentation of your model,
156
00:07:13,200 --> 00:07:16,440
ensuring reusability by
fellow community members
157
00:07:16,440 --> 00:07:18,480
and reproducibility of results.
158
00:07:18,480 --> 00:07:20,760
Providing a platform
on which other members
159
00:07:20,760 --> 00:07:22,293
may build their artifacts.
160
00:07:23,220 --> 00:07:25,590
We'll only add a title and
a small description here
161
00:07:25,590 --> 00:07:27,060
for simplicity's sake,
162
00:07:27,060 --> 00:07:29,370
but we encourage you to
add information relevant
163
00:07:29,370 --> 00:07:30,990
to how was the model trained,
164
00:07:30,990 --> 00:07:33,120
it's intended use and limitations,
165
00:07:33,120 --> 00:07:36,180
as well as it's identified
potential biases,
166
00:07:36,180 --> 00:07:37,440
evaluation results,
167
00:07:37,440 --> 00:07:39,843
and code samples on how to use your model.
168
00:07:41,460 --> 00:07:44,130
Great work contributing
a model to the Model Hub.
169
00:07:44,130 --> 00:07:46,440
This model can now be used
in downstream libraries
170
00:07:46,440 --> 00:07:48,783
simply by specifying
your model identifier.
| course/subtitles/en/32_managing-a-repo-on-the-model-hub.srt/0 | {
"file_path": "course/subtitles/en/32_managing-a-repo-on-the-model-hub.srt",
"repo_id": "course",
"token_count": 4934
} | 164 |
1
00:00:00,000 --> 00:00:03,417
(light transition music)
2
00:00:05,490 --> 00:00:08,440
- Let's have a look inside the
question answering pipeline.
3
00:00:09,780 --> 00:00:11,370
The question answering pipeline
4
00:00:11,370 --> 00:00:13,710
can extract answers to questions
5
00:00:13,710 --> 00:00:16,020
from a given context or passage of text
6
00:00:16,020 --> 00:00:18,370
like this part of the
Transformers repo README.
7
00:00:19,290 --> 00:00:21,180
It also works for very long context,
8
00:00:21,180 --> 00:00:24,720
even if the answer is at the
very end, like in this example.
9
00:00:24,720 --> 00:00:26,223
In this video, we'll see why.
10
00:00:27,840 --> 00:00:29,310
The question answering pipeline
11
00:00:29,310 --> 00:00:32,130
follows the same steps
as the other pipelines.
12
00:00:32,130 --> 00:00:35,550
The question and context are
tokenized as a sentence pair,
13
00:00:35,550 --> 00:00:38,463
fed to the model then some
post-processing is applied.
14
00:00:39,540 --> 00:00:42,840
So tokenization and model
steps should be familiar.
15
00:00:42,840 --> 00:00:45,000
We use the auto class suitable
for question answering
16
00:00:45,000 --> 00:00:47,460
instead of sequence classification,
17
00:00:47,460 --> 00:00:50,190
but one key difference
with text classification
18
00:00:50,190 --> 00:00:52,380
is that our model outputs two tensors
19
00:00:52,380 --> 00:00:55,230
named start logits and end logits.
20
00:00:55,230 --> 00:00:56,160
Why is that?
21
00:00:56,160 --> 00:00:58,170
Well, this is the way the
model finds the answer
22
00:00:58,170 --> 00:00:59,043
to the question.
23
00:01:00,090 --> 00:01:02,610
First, let's have a look
at the model inputs.
24
00:01:02,610 --> 00:01:04,800
It's numbers associated
with the tokenization
25
00:01:04,800 --> 00:01:05,850
of the question,
26
00:01:05,850 --> 00:01:07,753
followed by the context
27
00:01:07,753 --> 00:01:10,233
with the usual CLS and SEP special tokens.
28
00:01:11,130 --> 00:01:13,203
The answer is a part of those tokens.
29
00:01:14,040 --> 00:01:15,330
So we ask the model to predict
30
00:01:15,330 --> 00:01:17,040
which token starts the answer
31
00:01:17,040 --> 00:01:19,320
and which ends the answer.
32
00:01:19,320 --> 00:01:20,910
For our two logit outputs,
33
00:01:20,910 --> 00:01:23,823
the theoretical labels are
the pink and purple vectors.
34
00:01:24,870 --> 00:01:26,700
To convert those logits
into probabilities,
35
00:01:26,700 --> 00:01:28,596
we will need to apply a SoftMax,
36
00:01:28,596 --> 00:01:31,020
like in the text classification pipeline.
37
00:01:31,020 --> 00:01:32,310
We just mask the tokens
38
00:01:32,310 --> 00:01:35,940
that are not part of the
context before doing that,
39
00:01:35,940 --> 00:01:38,310
leaving the initial CLS token unmasked
40
00:01:38,310 --> 00:01:40,773
as we use it to predict
an impossible answer.
41
00:01:41,940 --> 00:01:44,730
This is what it looks
like in terms of code.
42
00:01:44,730 --> 00:01:47,340
We use a large negative
number for the masking
43
00:01:47,340 --> 00:01:49,533
since its exponential will then be zero.
44
00:01:50,850 --> 00:01:53,160
Now the probability for
each start and end position
45
00:01:53,160 --> 00:01:55,740
corresponding to a possible answer
46
00:01:55,740 --> 00:01:57,540
will give a score that is a product
47
00:01:57,540 --> 00:01:58,680
of the start probabilities
48
00:01:58,680 --> 00:02:00,873
and end probabilities at those position.
49
00:02:01,920 --> 00:02:04,530
Of course, a start index
greater than an end index
50
00:02:04,530 --> 00:02:06,330
corresponds to an impossible answer.
51
00:02:07,744 --> 00:02:09,510
Here is the code to find the best score
52
00:02:09,510 --> 00:02:11,280
for a possible answer.
53
00:02:11,280 --> 00:02:13,830
Once we have the start and
end position for the tokens,
54
00:02:13,830 --> 00:02:16,650
we use the offset mappings
provided by our tokenizer
55
00:02:16,650 --> 00:02:19,710
to find the span of characters
in the initial context,
56
00:02:19,710 --> 00:02:20,810
and we get our answer.
57
00:02:22,080 --> 00:02:23,700
Now, when the context is long,
58
00:02:23,700 --> 00:02:25,977
it might get truncated by the tokenizer.
59
00:02:26,834 --> 00:02:29,790
This might result in part
of the answer, or worse,
60
00:02:29,790 --> 00:02:32,190
the whole answer, being truncated.
61
00:02:32,190 --> 00:02:34,020
So we don't discard the truncated tokens
62
00:02:34,020 --> 00:02:36,420
but build new features with them.
63
00:02:36,420 --> 00:02:39,330
Each of those features
contains the question,
64
00:02:39,330 --> 00:02:42,150
then a chunk of text in the context.
65
00:02:42,150 --> 00:02:44,520
If we take disjoint chunks of texts,
66
00:02:44,520 --> 00:02:45,840
we might end up with the answer
67
00:02:45,840 --> 00:02:47,733
being split between two features.
68
00:02:48,720 --> 00:02:52,050
So instead, we take
overlapping chunks of text
69
00:02:52,050 --> 00:02:53,910
to make sure at least one of the chunks
70
00:02:53,910 --> 00:02:56,940
will fully contain the
answer to the question.
71
00:02:56,940 --> 00:02:59,220
So, tokenizers does all of
this for us automatically
72
00:02:59,220 --> 00:03:01,920
with the return overflowing tokens option.
73
00:03:01,920 --> 00:03:02,753
The stride argument
74
00:03:02,753 --> 00:03:04,830
controls the number of overlapping tokens.
75
00:03:05,940 --> 00:03:07,740
Here is how our very long context
76
00:03:07,740 --> 00:03:10,323
gets truncated in two
features with some overlap.
77
00:03:11,160 --> 00:03:12,720
By applying the same post-processing
78
00:03:12,720 --> 00:03:14,850
we saw before for each feature,
79
00:03:14,850 --> 00:03:17,970
we get the answer with a
score for each of them,
80
00:03:17,970 --> 00:03:19,920
and we take the answer with the best score
81
00:03:19,920 --> 00:03:21,303
as a final solution.
82
00:03:23,089 --> 00:03:26,506
(light transition music)
| course/subtitles/en/48_inside-the-question-answering-pipeline-(tensorflow).srt/0 | {
"file_path": "course/subtitles/en/48_inside-the-question-answering-pipeline-(tensorflow).srt",
"repo_id": "course",
"token_count": 2354
} | 165 |
1
00:00:00,573 --> 00:00:01,636
(air whooshing)
2
00:00:01,636 --> 00:00:02,594
(logo popping)
3
00:00:02,594 --> 00:00:05,550
(metal sliding)
4
00:00:05,550 --> 00:00:07,500
- In this video, we take
a look at setting up
5
00:00:07,500 --> 00:00:09,303
a custom loss function for training.
6
00:00:10,980 --> 00:00:13,260
In the default loss function, all samples,
7
00:00:13,260 --> 00:00:15,840
such as these code snippets,
are treated the same
8
00:00:15,840 --> 00:00:18,960
irrespective of their content
but there are scenarios
9
00:00:18,960 --> 00:00:21,660
where it could make sense to
weight the samples differently.
10
00:00:21,660 --> 00:00:24,570
If, for example, one sample
contains a lot of tokens
11
00:00:24,570 --> 00:00:26,160
that are of interest to us
12
00:00:26,160 --> 00:00:29,910
or if a sample has a
favorable diversity of tokens.
13
00:00:29,910 --> 00:00:31,950
We can also implement other heuristics
14
00:00:31,950 --> 00:00:33,963
with pattern matching or other rules.
15
00:00:35,993 --> 00:00:39,150
For each sample, we get a
loss value during training
16
00:00:39,150 --> 00:00:41,850
and we can combine that
loss with a weight.
17
00:00:41,850 --> 00:00:43,860
Then we can create a weighted sum
18
00:00:43,860 --> 00:00:45,660
or average over all samples
19
00:00:45,660 --> 00:00:47,613
to get the final loss for the batch.
20
00:00:48,690 --> 00:00:51,240
Let's have a look at a specific example.
21
00:00:51,240 --> 00:00:52,830
We want to set up a language model
22
00:00:52,830 --> 00:00:56,073
that helps us autocomplete
common data science code.
23
00:00:57,030 --> 00:01:01,830
For that task, we would like
to weight samples stronger
24
00:01:01,830 --> 00:01:04,110
where tokens related to
the data science stack,
25
00:01:04,110 --> 00:01:07,353
such as pd or np, occur more frequently.
26
00:01:10,140 --> 00:01:13,080
Here you see a loss function
that does exactly that
27
00:01:13,080 --> 00:01:15,180
for causal language modeling.
28
00:01:15,180 --> 00:01:18,030
It takes the model's input
and predicted logits,
29
00:01:18,030 --> 00:01:20,343
as well as the key tokens, as input.
30
00:01:21,869 --> 00:01:25,113
First, the inputs and logits are aligned.
31
00:01:26,490 --> 00:01:29,310
Then the loss per sample is calculated,
32
00:01:29,310 --> 00:01:30,843
followed by the weights.
33
00:01:32,430 --> 00:01:35,583
Finally, the loss and the weights
are combined and returned.
34
00:01:36,540 --> 00:01:39,150
This is a pretty big function,
so let's take a closer look
35
00:01:39,150 --> 00:01:40,953
at the loss and the weight blocks.
36
00:01:43,380 --> 00:01:45,600
During the calculation
of the standard loss,
37
00:01:45,600 --> 00:01:48,930
the logits and labels are
flattened over the batch.
38
00:01:48,930 --> 00:01:52,590
With the view, we unflatten
the tensor to get the matrix
39
00:01:52,590 --> 00:01:55,320
with a row for each sample
in the batch and a column
40
00:01:55,320 --> 00:01:57,723
for each position in the
sequence of the sample.
41
00:01:58,920 --> 00:02:00,600
We don't need the loss per position,
42
00:02:00,600 --> 00:02:04,083
so we average the loss over
all positions for each sample.
43
00:02:06,150 --> 00:02:08,970
For the weights, we use
Boolean logic to get a tensor
44
00:02:08,970 --> 00:02:12,483
with 1s where a keyword
occurred and 0s where not.
45
00:02:13,440 --> 00:02:15,690
This tensor has an additional dimension
46
00:02:15,690 --> 00:02:18,540
as the loss tensor we
just saw because we get
47
00:02:18,540 --> 00:02:21,693
the information for each
keyword in a separate matrix.
48
00:02:22,770 --> 00:02:24,120
We only want to know
49
00:02:24,120 --> 00:02:26,880
how many times keywords
occurred per sample,
50
00:02:26,880 --> 00:02:30,693
so we can sum overall keywords
and all positions per sample.
51
00:02:33,450 --> 00:02:35,010
Now we're almost there.
52
00:02:35,010 --> 00:02:38,850
We only need to combine the
loss with the weight per sample.
53
00:02:38,850 --> 00:02:41,790
We do this with element
wise multiplication
54
00:02:41,790 --> 00:02:45,233
and then average overall
samples in the batch.
55
00:02:45,233 --> 00:02:46,066
In the end,
56
00:02:46,066 --> 00:02:49,110
we have exactly one loss
value for the whole batch
57
00:02:49,110 --> 00:02:51,330
and this is the whole necessary logic
58
00:02:51,330 --> 00:02:53,223
to create a custom weighted loss.
59
00:02:56,250 --> 00:02:59,010
Let's see how we can make
use of that custom loss
60
00:02:59,010 --> 00:03:00,753
with Accelerate and the Trainer.
61
00:03:01,710 --> 00:03:04,656
In Accelerate, we just pass the input_ids
62
00:03:04,656 --> 00:03:05,730
to the model to get the logits
63
00:03:05,730 --> 00:03:08,103
and then we can call the
custom loss function.
64
00:03:09,000 --> 00:03:11,310
After that, we continue with
the normal training loop
65
00:03:11,310 --> 00:03:13,083
by, for example, calling backward.
66
00:03:14,010 --> 00:03:15,570
For the Trainer, we can overwrite
67
00:03:15,570 --> 00:03:19,260
the compute loss function
of the standard trainer.
68
00:03:19,260 --> 00:03:20,970
We just need to make sure that we return
69
00:03:20,970 --> 00:03:24,450
the loss and the model
outputs in the same format.
70
00:03:24,450 --> 00:03:27,570
With that, you can integrate
your own awesome loss function
71
00:03:27,570 --> 00:03:29,763
with both the Trainer and Accelerate.
72
00:03:31,389 --> 00:03:34,056
(air whooshing)
| course/subtitles/en/64_using-a-custom-loss-function.srt/0 | {
"file_path": "course/subtitles/en/64_using-a-custom-loss-function.srt",
"repo_id": "course",
"token_count": 2157
} | 166 |
Was recorded adlib - need to generate transcript with Whisper :) | course/subtitles/en/raw/chapter1/04a_the-carbon-footprint.md/0 | {
"file_path": "course/subtitles/en/raw/chapter1/04a_the-carbon-footprint.md",
"repo_id": "course",
"token_count": 13
} | 167 |
1
00:00:04,569 --> 00:00:07,529
Welcome to the Hugging Face tasks series.
2
00:00:07,529 --> 00:00:11,840
In this video, we will take a look at the
Translation task.
3
00:00:11,840 --> 00:00:19,420
Translation is the task of translating text
from one language to another.
4
00:00:19,420 --> 00:00:24,420
These models take a text in the source language
and output the translation of that text in
5
00:00:24,420 --> 00:00:28,609
the target language.
6
00:00:28,609 --> 00:00:31,619
The task is evaluated on the BLEU score.
7
00:00:31,619 --> 00:00:38,430
The score ranges from 0 to 1, in which 1 means
the translation perfectly matched and 0 did
8
00:00:38,430 --> 00:00:40,110
not match at all.
9
00:00:40,110 --> 00:00:45,320
BLEU is calculated over subsequent tokens
called n-grams.
10
00:00:45,320 --> 00:00:51,629
Unigram refers to a single token while bi-gram
refers to token pairs and n-grams refer to
11
00:00:51,629 --> 00:00:56,219
n subsequent tokens.
12
00:00:56,219 --> 00:01:01,859
Machine translation datasets contain pairs
of text in a language and translation of the
13
00:01:01,859 --> 00:01:05,910
text in another language.
14
00:01:05,910 --> 00:01:11,290
These models can help you build conversational
agents across different languages.
15
00:01:11,290 --> 00:01:16,110
One option is to translate the training data
used for the chatbot and train a separate
16
00:01:16,110 --> 00:01:19,970
chatbot.
17
00:01:19,970 --> 00:01:24,950
You can put one translation model from your
user’s language to the language your chatbot
18
00:01:24,950 --> 00:01:31,360
is trained on, translate the user inputs and
do intent classification, take the output
19
00:01:31,360 --> 00:01:39,399
of the chatbot and translate it from the language
your chatbot was trained on to the user’s
20
00:01:39,399 --> 00:01:40,850
language.
21
00:01:40,850 --> 00:01:49,720
For more information about the Translation
task, check out the Hugging Face course.
| course/subtitles/en/tasks_05_🤗-tasks-translation.srt/0 | {
"file_path": "course/subtitles/en/tasks_05_🤗-tasks-translation.srt",
"repo_id": "course",
"token_count": 697
} | 168 |
1
00:00:06,320 --> 00:00:11,440
Jetons un coup d'œil à la tokenisation en sous-mots. Comprendre pourquoi la tokenisation en sous-mots
2
00:00:11,440 --> 00:00:16,320
est intéressante nécessite de comprendre les défauts de la tokenisation basée sur les mots et sur les caractères.
3
00:00:17,200 --> 00:00:21,760
Si vous n'avez pas vu les premières vidéos sur la tokenisation basée sur les mots et les caractères,
4
00:00:21,760 --> 00:00:24,400
nous vous recommandons de les consulter avant de regarder cette vidéo.
5
00:00:27,680 --> 00:00:33,440
La tokenisation en sous-mots se situe entre les algorithmes de tokenisation basés sur les caractères et les mots.
6
00:00:33,440 --> 00:00:40,960
L'idée est de trouver un terrain d'entente entre de très grands vocabulaires, une grande quantité de
7
00:00:40,960 --> 00:00:47,040
tokens hors vocabulaire et une perte de sens entre des mots très similaires pour les tokenizers basés sur des mots,
8
00:00:47,040 --> 00:00:52,800
et de très longues séquences, des tokens individuels moins significatifs pour les tokenizers basés sur des caractères.
9
00:00:54,720 --> 00:00:59,360
Ces algorithmes reposent sur le principe suivant : les mots fréquemment utilisés ne doivent pas
10
00:00:59,360 --> 00:01:04,800
être divisés en sous-mots plus petits, mais les mots rares doivent être décomposés en sous-mots significatifs.
11
00:01:06,320 --> 00:01:11,520
Un exemple est le mot « dog » : nous aimerions que notre tokenizer ait un seul identifiant pour le mot
12
00:01:11,520 --> 00:01:18,480
« dog », plutôt que de le diviser en caractères : « d », « o » et « g ». Cependant, lorsque nous rencontrons le mot
13
00:01:18,480 --> 00:01:23,920
« dogs », nous aimerions que notre tokenizer comprenne qu'à la racine, il s'agit toujours du mot « dog »
14
00:01:23,920 --> 00:01:31,280
avec un « s » ajouté tout en modifiant légèrement le sens tout en gardant l'idée originale. Un autre exemple
15
00:01:31,280 --> 00:01:37,520
est un mot complexe comme « tokenization » qui peut être divisé en sous-mots significatifs. La racine
16
00:01:37,520 --> 00:01:42,000
du mot est « token » et « ization » complète la racine pour lui donner un sens légèrement différent.
17
00:01:42,720 --> 00:01:48,960
Il est logique de diviser le mot en deux : « token » en tant que racine du mot (étiqueté comme le « début »
18
00:01:48,960 --> 00:01:53,840
du mot) et « ization » en tant qu'informations supplémentaires (étiquetées comme « complétion » du mot).
19
00:01:56,240 --> 00:02:00,320
À son tour, le modèle pourra désormais donner un sens à « token » dans différentes situations.
20
00:02:00,880 --> 00:02:06,400
Il comprendra que les mots « token », « tokens », « tokenizing » et « tokenization » sont liés et ont
21
00:02:06,400 --> 00:02:14,000
une signification similaire. Il comprendra également que « tokenization », « modernization » et « immunization »,
22
00:02:14,000 --> 00:02:18,960
qui ont toutes les mêmes suffixes, sont probablement utilisées dans les mêmes situations syntaxiques.
23
00:02:20,320 --> 00:02:25,920
Les tokenizers basés sur des sous-mots ont généralement un moyen d'identifier quels tokens sont des débuts de mots et
24
00:02:25,920 --> 00:02:34,320
quels tokens complètent le début de mots. DOnc ici « token » est le début d'un mot et « ##isation » est la complétion d'un mot.
25
00:02:34,960 --> 00:02:40,800
Ici, le préfixe « ## » indique que « ization » fait partie d'un mot plutôt que son début.
26
00:02:41,760 --> 00:02:49,440
Le « ## » provient du tokenizer de BERT, basé sur l'algorithme WordPiece. D'autres tokenizers utilisent d'autres
27
00:02:49,440 --> 00:02:54,720
préfixes pouvant être placés pour indiquer une partie de mots comme ici, ou le début de mots à la place !
28
00:02:56,000 --> 00:03:01,040
Il existe de nombreux algorithmes différents qui peuvent être utilisés pour la tokenisation en sous-mots, et la plupart des modèles
29
00:03:01,040 --> 00:03:05,760
obtenant des résultats de pointe en anglais utilisent aujourd'hui une sorte d'algorithme de tokenisation en sous-mots.
30
00:03:05,760 --> 00:03:12,320
Ces approches aident à réduire la taille du vocabulaire en partageant des informations
31
00:03:12,320 --> 00:03:17,840
entre différents mots, en ayant la possibilité de comprendre les préfixes et les suffixes comme tels.
32
00:03:18,480 --> 00:03:27,760
Ils conservent le sens de mots très similaires, en reconnaissant les tokens similaires qui les composent. | course/subtitles/fr/15_subword-based-tokenizers.srt/0 | {
"file_path": "course/subtitles/fr/15_subword-based-tokenizers.srt",
"repo_id": "course",
"token_count": 1776
} | 169 |
1
00:00:04,000 --> 00:00:07,760
Dans cette vidéo, nous allons passer en revue la navigation dans le Hub d'HuggingFace.
2
00:00:10,080 --> 00:00:16,160
Ceci est la page d'accueil huggingface.co. Pour accéder au Hub des modèles, cliquez sur l'onglet « Models » dans l'
3
00:00:16,160 --> 00:00:22,720
angle supérieur droit. Vous devriez être face à cette interface Web, qui peut être divisée en plusieurs parties.
4
00:00:24,240 --> 00:00:28,560
Sur la gauche, vous trouverez des catégories que vous pouvez utiliser pour personnaliser votre recherche de modèles.
5
00:00:29,760 --> 00:00:35,840
La première catégorie est celle des « Tasks ». Les modèles sur le Hub peuvent être utilisés pour une grande variété de tâches.
6
00:00:36,480 --> 00:00:41,440
Celles-ci incluent des tâches de traitement du langage naturel, telles que la réponse aux questions ou la classification de texte,
7
00:00:41,440 --> 00:00:47,600
mais elles ne se limitent pas au NLP. D'autres tâches d'autres domaines sont également disponibles,
8
00:00:47,600 --> 00:00:52,240
telles que la classification d'images pour la vision par ordinateur ou la reconnaissance vocale automatique pour la parole.
9
00:00:54,720 --> 00:01:00,400
La deuxième catégorie est celle des « Libraries ». Les modèles partagent généralement l'un des trois backbones :
10
00:01:01,040 --> 00:01:07,040
PyTorch, TensorFlow ou JAX. Cependant, d'autres backbones, tels que Rust ou ONNX, existent également.
11
00:01:09,440 --> 00:01:14,720
Enfin, cet onglet peut également être utilisé pour spécifier de quel framework provient le modèle.
12
00:01:15,920 --> 00:01:20,880
Cela inclut Transformers, mais n'y est pas limité. Le Hub des modèles est utilisé pour héberger
13
00:01:20,880 --> 00:01:25,840
de nombreux modèles de frameworks différents, et nous cherchons activement à héberger les modèles d'autres frameworks.
14
00:01:28,400 --> 00:01:33,440
La troisième catégorie est l'onglet « Datasets ». Sélectionner un jeu de données dans cet onglet
15
00:01:33,440 --> 00:01:37,360
signifie filtrer les modèles afin qu'ils aient été entraînés sur ce jeu de données spécifique.
16
00:01:39,040 --> 00:01:43,600
La quatrième catégorie est l'onglet « Languages » . Sélectionner une langue dans cet onglet
17
00:01:43,600 --> 00:01:46,800
signifie filtrer les modèles afin qu'ils gèrent la langue sélectionnée.
18
00:01:48,480 --> 00:01:53,840
Enfin, la dernière catégorie permet de choisir la licence avec laquelle le modèle est partagé.
19
00:01:56,480 --> 00:01:59,440
Sur la droite, vous trouverez les modèles disponibles sur le Hub des modèles !
20
00:02:00,320 --> 00:02:06,400
Les modèles sont classés par téléchargements. Lorsque vous cliquez sur un modèle, vous devez faire face à sa carte de modèle.
21
00:02:07,040 --> 00:02:11,520
La carte du modèle contient des informations sur le modèle : sa description, l'utilisation prévue, les
22
00:02:11,520 --> 00:02:18,240
limites et les biais. Elle peut également afficher des extraits de code expliquant comment utiliser le modèle, ainsi que
23
00:02:18,240 --> 00:02:23,840
toute information pertinente : procédure d'entraînement, traitement des données, résultats d'évaluation, droits d'auteur.
24
00:02:25,440 --> 00:02:30,160
Ces informations sont cruciales pour que le modèle soit utilisé. Plus une carte de modèle est bien conçue,
25
00:02:30,160 --> 00:02:34,000
plus il sera facile pour les autres utilisateurs d'exploiter votre modèle dans leurs applications.
26
00:02:35,600 --> 00:02:41,440
À droite de la carte du modèle se trouve l'API d'inférence. Cette API d'inférence peut être utilisée pour jouer
27
00:02:41,440 --> 00:02:46,640
directement avec le modèle. N'hésitez pas à modifier le texte et à cliquer sur calculer pour voir comment le modèle
28
00:02:46,640 --> 00:02:55,200
se comporterait avec vos entrées. En haut de l'écran se trouvent les balises du modèle. Celles-ci incluent la tâche
29
00:02:55,200 --> 00:02:58,640
ainsi que toute autre balise pertinente pour les catégories que nous venons de voir.
30
00:03:01,200 --> 00:03:05,920
L'onglet « Files and versions » affiche l'architecture du dépôt de ce modèle.
31
00:03:07,120 --> 00:03:12,080
Ici, nous pouvons voir tous les fichiers qui définissent ce modèle. Vous verrez toutes les fonctionnalités habituelles
32
00:03:12,080 --> 00:03:22,320
d'un dépôt git : les branches disponibles, l'historique des commits ainsi que la comparaison des commits.
33
00:03:25,600 --> 00:03:28,800
Trois boutons différents sont disponibles en haut de la carte du modèle.
34
00:03:29,600 --> 00:03:32,800
Le premier montre comment utiliser l'API d'inférence par programmation.
35
00:03:35,760 --> 00:03:38,640
Le deuxième montre comment entraîner ce modèle dans SageMaker,
36
00:03:42,720 --> 00:03:45,840
et le dernier montre comment charger ce modèle dans la bibliothèque appropriée.
37
00:03:45,840 --> 00:03:49,000
Pour BERT, il s'agit de transformers. | course/subtitles/fr/31_navigating-the-model-hub.srt/0 | {
"file_path": "course/subtitles/fr/31_navigating-the-model-hub.srt",
"repo_id": "course",
"token_count": 1997
} | 170 |
1
00:00:04,130 --> 00:00:08,390
Jetons un coup d'œil à l'intérieur du pipeline de de réponse aux questions.
2
00:00:08,390 --> 00:00:12,630
Le pipeline de réponse aux questions peut extraire les réponses aux questions d'un contexte
3
00:00:12,630 --> 00:00:18,150
ou d'un passage de texte donné, comme cette partie du README du dépôt Transformers.
4
00:00:18,150 --> 00:00:22,440
Cela fonctionne aussi pour des contextes très longs, même si la réponse est à la toute fin, comme dans
5
00:00:22,440 --> 00:00:23,440
cet exemple.
6
00:00:23,440 --> 00:00:24,680
Dans cette vidéo, nous allons voir pourquoi !
7
00:00:24,680 --> 00:00:31,540
Le pipeline de réponse aux questions suit les mêmes étapes que les autres pipelines : la question
8
00:00:31,540 --> 00:00:36,380
et le contexte sont tokenisés sous la forme d'une paire de phrases, transmis au modèle, puis un post-traitement
9
00:00:36,380 --> 00:00:37,649
est appliqué.
10
00:00:37,649 --> 00:00:41,790
Les étapes de tokenisation et de modèle doivent être familières.
11
00:00:41,790 --> 00:00:47,020
Nous utilisons la classe automatique adaptée à la réponse aux questions au lieu de la classification de séquence,
12
00:00:47,020 --> 00:00:52,039
mais une différence clé avec la classification de texte est que notre modèle génère deux tenseurs nommés
13
00:00:52,039 --> 00:00:54,559
logits de début et logits de fin.
14
00:00:54,559 --> 00:00:55,559
Pourquoi donc?
15
00:00:55,559 --> 00:00:59,850
Eh bien, c'est ainsi que le modèle trouve la réponse à la question.
16
00:00:59,850 --> 00:01:02,270
Examinons d'abord les entrées du modèle.
17
00:01:02,270 --> 00:01:07,160
Il s'agit de chiffres associés à la tokenisation de la question suivis du contexte (avec
18
00:01:07,160 --> 00:01:10,710
les tokens spéciaux habituels [CLS] et [SEP]).
19
00:01:10,710 --> 00:01:13,310
La réponse fait partie de ces tokens.
20
00:01:13,310 --> 00:01:17,759
Nous demandons donc au modèle de prédire quel token commence la réponse et lequel termine la réponse.
21
00:01:17,759 --> 00:01:24,380
Pour nos deux sorties logit, les étiquettes théoriques sont les vecteurs rose et violet.
22
00:01:24,380 --> 00:01:28,360
Pour convertir ces logits en probabilités, nous devrons appliquer une SoftMax, comme dans le
23
00:01:28,360 --> 00:01:30,439
pipeline de classification de texte.
24
00:01:30,439 --> 00:01:35,070
Nous masquons simplement les tokens qui ne font pas partie du contexte avant de le faire, laissant
25
00:01:35,070 --> 00:01:41,009
le token [CLS] initial démasqué car nous l'utilisons pour prédire une réponse impossible.
26
00:01:41,009 --> 00:01:43,579
Voici à quoi cela ressemble en termes de code.
27
00:01:43,579 --> 00:01:47,729
On utilise un grand nombre négatif pour le masquage, puisque son exponentielle sera alors 0.
28
00:01:47,729 --> 00:01:53,610
Maintenant la probabilité pour chaque position de début et de fin correspondant à une réponse possible,
29
00:01:53,610 --> 00:01:57,600
on donne un score qui est le produit des probabilités de début et des probabilités de fin
30
00:01:57,600 --> 00:02:00,180
à ces positions.
31
00:02:00,180 --> 00:02:05,430
Bien entendu, un indice de début supérieur à un indice de fin correspond à une réponse impossible.
32
00:02:05,430 --> 00:02:08,940
Voici le code pour trouver le meilleur score pour une réponse possible.
33
00:02:08,940 --> 00:02:13,070
Une fois que nous avons les positions de début et de fin des tokens, nous utilisons l'association de décalage fournis
34
00:02:13,070 --> 00:02:18,270
par notre tokenizer pour trouver la plage de caractères dans le contexte initial et obtenir notre réponse !
35
00:02:18,270 --> 00:02:23,820
Désormais, lorsque le contexte est long, il peut être tronqué par le tokenizer.
36
00:02:23,820 --> 00:02:29,099
Cela pourrait entraîner une partie de la réponse, ou pire, la réponse entière, étant tronquée.
37
00:02:29,099 --> 00:02:33,319
Nous ne supprimons donc pas les tokens tronqués, mais construisons de nouvelles caractéristiques avec eux.
38
00:02:33,319 --> 00:02:39,320
Chacune de ces caractéristiques contient la question, puis un morceau de texte dans le contexte.
39
00:02:39,320 --> 00:02:43,760
Si nous prenons des morceaux de textes disjoints, nous pourrions nous retrouver avec la réponse divisée entre
40
00:02:43,760 --> 00:02:45,330
deux caractéristiques.
41
00:02:45,330 --> 00:02:49,709
Donc, à la place, nous prenons des morceaux de textes qui se chevauchent, pour nous assurer qu'au moins un des morceaux
42
00:02:49,709 --> 00:02:51,650
contiendra entièrement la réponse à la question.
43
00:02:51,650 --> 00:02:56,920
Les tokenizers font tout cela pour nous automatiquement avec l'option `return_overflowing_tokens`.
44
00:02:56,920 --> 00:03:02,069
L'argument `stride` contrôle le nombre de tokens qui se chevauchent.
45
00:03:02,069 --> 00:03:05,930
Voici comment notre très long contexte est tronqué en deux caractéristiques avec un certain chevauchement.
46
00:03:05,930 --> 00:03:10,051
En appliquant le même post-traitement que nous avons vu précédemment pour chaque caractéristique, nous obtenons la réponse
47
00:03:10,051 --> 00:03:20,349
avec un score pour chacun d'eux, et nous prenons la réponse avec le meilleur score comme solution finale. | course/subtitles/fr/47_inside-the-question-answering-pipeline-(pytorch).srt/0 | {
"file_path": "course/subtitles/fr/47_inside-the-question-answering-pipeline-(pytorch).srt",
"repo_id": "course",
"token_count": 2145
} | 171 |
1
00:00:05,520 --> 00:00:09,360
Dans cette vidéo, nous examinons le traitement des données nécessaire pour entraîner
2
00:00:09,360 --> 00:00:15,920
des modèles de langage causal. La modélisation du langage causal consiste à prédire le token suivant en fonction
3
00:00:15,920 --> 00:00:20,880
du token précédent. Un autre terme pour la modélisation du langage causal est la modélisation autorégressive.
4
00:00:21,760 --> 00:00:26,560
Dans l'exemple que vous voyez ici, le token suivant pourrait par exemple être « NLP »
5
00:00:26,560 --> 00:00:33,280
ou « apprentissage automatique ». Un exemple populaire de modèle de langage causal est la famille de modèles GPT.
6
00:00:35,680 --> 00:00:40,400
Pour entraîner des modèles tels que GPT-2, nous commençons généralement avec un grand corpus de fichiers texte.
7
00:00:41,280 --> 00:00:45,760
Ces fichiers peuvent être des pages Web extraites d'Internet, telles que le jeu de données Common Crawl,
8
00:00:45,760 --> 00:00:51,920
ou des fichiers Python de GitHub, comme vous pouvez le voir ici. Dans un premier temps, nous devons tokeniser
9
00:00:51,920 --> 00:00:57,520
ces fichiers afin de pouvoir les donner au modèle. Ici, nous montrons les textes tokenisés sous forme de barres de
10
00:00:57,520 --> 00:01:06,000
illustrant qu'ils y en a des longs et des courts. Ceci est trian commun lorsque l'on travail avec du texte.
11
00:01:06,000 --> 00:01:07,440
Cependant les transformers ont une
12
00:01:07,440 --> 00:01:12,960
longueur de contexte limitée et, selon la source de données, il est possible que les textes tokenisés
13
00:01:12,960 --> 00:01:18,640
soient beaucoup plus longs que cette longueur de contexte. Dans ce cas, nous pourrions simplement tronquer la séquence
14
00:01:18,640 --> 00:01:24,160
à la longueur du contexte, mais cela signifierait que nous perdons tout après la longueur du contexte.
15
00:01:25,360 --> 00:01:30,960
À l'aide de `return_overflowing_tokens`, nous pouvons utiliser le tokenizer pour créer des morceaux
16
00:01:30,960 --> 00:01:36,960
chacun étant de la taille de la longueur du contexte. Parfois, il peut arriver que le dernier morceau soit
17
00:01:36,960 --> 00:01:41,440
trop court s'il n'y a pas assez de tokens pour le remplir. Dans ce cas, nous aimerions le supprimer.
18
00:01:43,440 --> 00:01:48,800
Avec le mot-clé `return_length`, nous obtenons également la longueur de chaque bloc du tokenizer.
19
00:01:51,760 --> 00:01:57,280
Cette fonction affiche toutes les étapes nécessaires pour préparer le jeu de données. D'abord, nous tokenisons le
20
00:01:57,280 --> 00:02:03,520
jeu de données avec les indicateurs que je viens de mentionner. Ensuite, nous parcourons chaque morceau et si sa longueur correspond à
21
00:02:03,520 --> 00:02:08,960
la longueur du contexte, nous l'ajoutons aux entrées que nous renvoyons. Nous pouvons appliquer cette fonction à l'ensemble du jeu de
22
00:02:08,960 --> 00:02:17,520
données et nous nous assurons d'utiliser des batchs et de supprimer les colonnes existantes. Nous devons supprimer des colonnes
23
00:02:17,520 --> 00:02:23,280
car nous pouvons créer plusieurs échantillons par texte et les formes du jeu de données ne correspondraient pas dans ce cas.
24
00:02:26,960 --> 00:02:32,400
Si la longueur du contexte est similaire à celle des fichiers, cette approche ne fonctionne plus aussi bien.
25
00:02:33,520 --> 00:02:39,440
Dans cet exemple, les échantillons 1 et 2 sont plus courts que la taille du contexte et seraient ignorés avec
26
00:02:39,440 --> 00:02:46,400
l'approche précédente. Dans ce cas, il est préférable de commencer par segmenter chaque échantillon sans troncature
27
00:02:46,400 --> 00:02:52,000
puis de concaténer les échantillons segmentés avec un token de fin de chaîne, ou « EOS », entre les deux.
28
00:02:53,840 --> 00:02:57,440
Enfin, nous pouvons fragmenter cette longue séquence avec la longueur du contexte
29
00:02:57,440 --> 00:03:05,840
et nous ne perdons aucune séquence car elles sont trop courtes. Jusqu'à présent, nous n'avons parlé que
30
00:03:05,840 --> 00:03:10,720
des entrées pour la modélisation du langage causal, mais pas des étiquettes nécessaires à l'entraînement supervisée.
31
00:03:11,600 --> 00:03:16,480
Lorsque nous effectuons une modélisation du langage causal, nous n'avons pas besoin d'étiquettes supplémentaires pour les séquences d'entrée
32
00:03:16,480 --> 00:03:22,080
car les séquences d'entrée elles-mêmes sont les étiquettes. Dans cet exemple, lorsque nous transmettons
33
00:03:22,080 --> 00:03:26,560
le token « Trans » au prochain token que nous voulons que le modèle prédise est « formers ».
34
00:03:27,280 --> 00:03:33,360
À l'étape suivante, nous donnons « Trans » et « formers » au modèle et l'étiquette que nous voulons prédire est « are ».
35
00:03:35,280 --> 00:03:42,400
Ce schéma se poursuit et, comme vous pouvez le voir, la séquence d'entrée est l'étiquette qui vient d'être décalée d'une unité.
36
00:03:43,440 --> 00:03:48,000
Étant donné que le modèle n'effectue une prédiction qu'après le premier token, le premier élément
37
00:03:48,000 --> 00:03:54,480
de la séquence d'entrée, dans ce cas « Trans », n'est pas utilisé comme étiquette. De même, nous n'avons pas
38
00:03:54,480 --> 00:04:00,400
d'étiquette pour le dernier token de la séquence puisqu'il n'y a pas de token après la fin de la séquence.
39
00:04:03,920 --> 00:04:09,200
Voyons ce que nous devons faire pour créer les étiquettes pour la modélisation du langage causal dans le code. Si
40
00:04:10,160 --> 00:04:15,600
nous voulons calculer la perte sur un batch, nous pouvons simplement transmettre les `input_ids` en tant qu'étiquettes et tout le
41
00:04:15,600 --> 00:04:19,432
décalage est géré dans le modèle en interne.
42
00:04:19,432 --> 00:04:21,600
Donc vous voyez qu'il n'y a pas de magie
43
00:04:21,600 --> 00:04:27,840
impliquée dans le traitement des données pour la modélisation du langage causal et ne nécessite que quelques étapes simples !
| course/subtitles/fr/63_data-processing-for-causal-language-modeling.srt/0 | {
"file_path": "course/subtitles/fr/63_data-processing-for-causal-language-modeling.srt",
"repo_id": "course",
"token_count": 2441
} | 172 |
1
00:00:04,560 --> 00:00:06,640
Bienvenue dans la série d'Hugging Face sur les tâches !
2
00:00:07,200 --> 00:00:10,400
Dans cette vidéo, nous allons jeter un œil à la modélisation du langage causal.
3
00:00:13,600 --> 00:00:16,880
La modélisation du langage causal consiste à prédire le
4
00:00:16,880 --> 00:00:21,920
mot suivant dans une phrase, compte tenu de tous les mots précédents. Cette tâche est très
5
00:00:21,920 --> 00:00:29,920
similaire à la fonction de correction automatique que vous pourriez avoir sur votre téléphone.
6
00:00:29,920 --> 00:00:34,720
Ces modèles prennent une séquence à compléter et génèrent la séquence complète.
7
00:00:38,640 --> 00:00:44,160
Les métriques de classification ne peuvent pas être utilisées, car il n'y a pas de réponse correcte unique pour la complétion.
8
00:00:44,960 --> 00:00:49,280
Au lieu de cela, nous évaluons la distribution du texte complété par le modèle.
9
00:00:50,800 --> 00:00:55,440
Une métrique courante pour ce faire est la perte d'entropie croisée. La perplexité est
10
00:00:55,440 --> 00:01:01,280
aussi une métrique largement utilisée et elle est calculée comme l'exponentielle de la perte d'entropie croisée.
11
00:01:05,200 --> 00:01:11,840
Vous pouvez utiliser n'importe quel jeu de données avec du texte brut et tokeniser le texte pour préparer les données.
12
00:01:15,040 --> 00:01:18,240
Les modèles de langage causal peuvent être utilisés pour générer du code.
13
00:01:22,480 --> 00:01:33,200
Pour plus d'informations sur la tâche de modélisation du langage causal, consultez le cours d'Hugging Face.
| course/subtitles/fr/tasks_02_🤗-tasks-causal-language-modeling.srt/0 | {
"file_path": "course/subtitles/fr/tasks_02_🤗-tasks-causal-language-modeling.srt",
"repo_id": "course",
"token_count": 659
} | 173 |
1
00:00:00,189 --> 00:00:02,856
(空气呼啸)
(air whooshing)
2
00:00:05,550 --> 00:00:07,293
- 什么是转移学习?
- What is transfer learning?
3
00:00:09,480 --> 00:00:10,920
转移学习的思想
The idea of transfer learning
4
00:00:10,920 --> 00:00:12,570
是利用在另一项任务上使用大量数据训练的模型结果
is to leverage the knowledge acquired
5
00:00:12,570 --> 00:00:15,543
来获取知识。
by a model trained with lots of data on another task.
6
00:00:16,410 --> 00:00:20,130
模型 A 将专门针对任务 A 进行训练。
The model A will be trained specifically for task A.
7
00:00:20,130 --> 00:00:22,200
现在假设您想为了另一个任务
Now let's say you want to train a model B
8
00:00:22,200 --> 00:00:23,970
训练模型 B。
for a different task.
9
00:00:23,970 --> 00:00:27,330
一种选择是从头开始训练模型。
One option would be to train the model from scratch.
10
00:00:27,330 --> 00:00:30,633
但这可能需要大量的计算、时间和数据。
This could take lots of computation, time and data.
11
00:00:31,470 --> 00:00:34,260
我们可以有另一种选择,初始化模型 B
Instead, we could initialize model B
12
00:00:34,260 --> 00:00:36,570
它与模型 A 具有相同的权重,
with the same weights as model A,
13
00:00:36,570 --> 00:00:39,213
将模型 A 的知识转移到任务 B 上。
transferring the knowledge of model A on task B.
14
00:00:41,040 --> 00:00:42,690
从头开始训练时,
When training from scratch,
15
00:00:42,690 --> 00:00:45,870
所有模型的权重都是随机初始化的。
all the model's weight are initialized randomly.
16
00:00:45,870 --> 00:00:48,870
在这个例子中,我们正在基于识别任务上
In this example, we are training a BERT model
17
00:00:48,870 --> 00:00:50,220
训练一个 BERT 模型
on the task of recognizing
18
00:00:50,220 --> 00:00:52,203
来判断两个句子是否相似。
if two sentences are similar or not.
19
00:00:54,116 --> 00:00:56,730
左边的例子是从头开始训练的,
On the left, it's trained from scratch,
20
00:00:56,730 --> 00:01:00,000
右边则代表正在微调预训练模型。
and on the right it's fine-tuning a pretrained model.
21
00:01:00,000 --> 00:01:02,220
正如我们所见,使用转移学习
As we can see, using transfer learning
22
00:01:02,220 --> 00:01:05,160
和预训练模型产生了更好的结果。
and the pretrained model yields better results.
23
00:01:05,160 --> 00:01:07,140
如果我们训练更长时间也没关系。
And it doesn't matter if we train longer.
24
00:01:07,140 --> 00:01:10,620
从头开始的训练准确率上限在 70% 左右
The training from scratch is capped around 70% accuracy
25
00:01:10,620 --> 00:01:13,293
而预训练模型轻松达到了 86%。
while the pretrained model beats the 86% easily.
26
00:01:14,460 --> 00:01:16,140
这是因为预训练模型
This is because pretrained models
27
00:01:16,140 --> 00:01:18,420
通常基于大量数据进行训练
are usually trained on large amounts of data
28
00:01:18,420 --> 00:01:21,000
这些数据为模型在预训练期间
that provide the model with a statistical understanding
29
00:01:21,000 --> 00:01:23,413
提供了对语言使用的统计理解。
of the language used during pretraining.
30
00:01:24,450 --> 00:01:25,950
在计算机视觉中,
In computer vision,
31
00:01:25,950 --> 00:01:28,080
转移学习已成功应用
transfer learning has been applied successfully
32
00:01:28,080 --> 00:01:30,060
将近十年。
for almost ten years.
33
00:01:30,060 --> 00:01:32,850
模型经常在 ImageNet 上进行预训练,
Models are frequently pretrained on ImageNet,
34
00:01:32,850 --> 00:01:36,153
它是一种包含 120 万张照片图像的数据集。
a dataset containing 1.2 millions of photo images.
35
00:01:37,170 --> 00:01:41,130
每个图像都按 1000 个标签中的一个进行分类。
Each image is classified by one of 1000 labels.
36
00:01:41,130 --> 00:01:44,010
像这样在标记数据上训练
Training like this, on labeled data
37
00:01:44,010 --> 00:01:45,663
称为监督学习。
is called supervised learning.
38
00:01:47,340 --> 00:01:49,140
在自然语言处理中,
In Natural Language Processing,
39
00:01:49,140 --> 00:01:51,870
转移学习是最近才出现的。
transfer learning is a bit more recent.
40
00:01:51,870 --> 00:01:54,480
它与 ImageNet 的一个关键区别是预训练
A key difference with ImageNet is that the pretraining
41
00:01:54,480 --> 00:01:56,460
通常是自我监督的,
is usually self-supervised,
42
00:01:56,460 --> 00:01:58,770
这意味着它不需要人工对标签
which means it doesn't require humans annotations
43
00:01:58,770 --> 00:01:59,673
进行注释。
for the labels.
44
00:02:00,780 --> 00:02:02,700
一个非常常见的预训练目标
A very common pretraining objective
45
00:02:02,700 --> 00:02:05,310
是猜测句子中的下一个单词。
is to guess the next word in a sentence.
46
00:02:05,310 --> 00:02:07,710
它只需要大量的输入文本。
Which only requires lots and lots of text.
47
00:02:07,710 --> 00:02:10,710
例如 GPT-2,就是这样预训练的
GPT-2 for instance, was pretrained this way
48
00:02:10,710 --> 00:02:12,900
它使用 4500 万个用户在 Reddit 上发布的
using the content of 45 millions links
49
00:02:12,900 --> 00:02:14,673
链接的内容。
posted by users on Reddit.
50
00:02:16,560 --> 00:02:19,590
自监督预训练目标的另一个例子
Another example of self-supervised pretraining objective
51
00:02:19,590 --> 00:02:22,470
是预测随机屏蔽词的值。
is to predict the value of randomly masked words.
52
00:02:22,470 --> 00:02:24,540
这类似于填空测试
Which is similar to fill-in-the-blank tests
53
00:02:24,540 --> 00:02:26,760
您可能在学校做过。
you may have done in school.
54
00:02:26,760 --> 00:02:29,880
BERT 是使用英文维基百科和 11,000 本未出版的书籍
BERT was pretrained this way using the English Wikipedia
55
00:02:29,880 --> 00:02:31,893
进行预训练的。
and 11,000 unpublished books.
56
00:02:33,120 --> 00:02:36,450
在实践中,转移学习是通过抛弃原模型的头部
In practice, transfer learning is applied on a given model
57
00:02:36,450 --> 00:02:39,090
即其针对预训练目标的最后几层
by throwing away its head, that is,
58
00:02:39,090 --> 00:02:42,150
并用一个新的、随机初始化的头部
its last layers focused on the pretraining objective,
59
00:02:42,150 --> 00:02:45,360
来替换它来应用的
and replacing it with a new, randomly initialized head
60
00:02:45,360 --> 00:02:46,860
这个新的头部适用于当前的任务。
suitable for the task at hand.
61
00:02:47,970 --> 00:02:51,570
例如,当我们之前微调 BERT 模型时,
For instance, when we fine-tuned a BERT model earlier,
62
00:02:51,570 --> 00:02:54,060
我们删除了分类掩码词的头部
we removed the head that classified mask words
63
00:02:54,060 --> 00:02:56,790
并将其替换为具有 2 个输出的分类器。
and replaced it with a classifier with 2 outputs.
64
00:02:56,790 --> 00:02:58,563
因为我们的任务有两个标签。
Since our task had two labels.
65
00:02:59,700 --> 00:03:02,490
为了尽可能高效
To be as efficient as possible, the pretrained model used
66
00:03:02,490 --> 00:03:03,770
所使用的预训练模型
should be as similar as possible
67
00:03:03,770 --> 00:03:06,270
应尽可能与其微调的任务相似。
to the task it's fine-tuned on.
68
00:03:06,270 --> 00:03:08,190
例如,如果当前需要
For instance, if the problem
69
00:03:08,190 --> 00:03:10,860
对德语句子进行分类,
is to classify German sentences,
70
00:03:10,860 --> 00:03:13,053
最好使用德语预训练模型。
it's best to use a German pretrained model.
71
00:03:14,370 --> 00:03:16,649
但有好事也有坏事。
But with the good comes the bad.
72
00:03:16,649 --> 00:03:19,380
预训练模型不仅转移了它的知识,
The pretrained model does not only transfer its knowledge,
73
00:03:19,380 --> 00:03:21,693
同时也转移了它可能包含的任何偏见。
but also any bias it may contain.
74
00:03:22,530 --> 00:03:24,300
ImageNet 主要包含来自美国和西欧
ImageNet mostly contains images
75
00:03:24,300 --> 00:03:26,850
的图像。
coming from the United States and Western Europe.
76
00:03:26,850 --> 00:03:28,020
所以基于它进行微调的模型
So models fine-tuned with it
77
00:03:28,020 --> 00:03:31,710
通常会在来自这些国家或地区的图像上表现更好。
usually will perform better on images from these countries.
78
00:03:31,710 --> 00:03:33,690
OpenAI 还研究了
OpenAI also studied the bias
79
00:03:33,690 --> 00:03:36,120
其使用猜测下一个单词目标
in the predictions of its GPT-3 model
80
00:03:36,120 --> 00:03:36,953
预训练的 GPT-3 模型中
which was pretrained
81
00:03:36,953 --> 00:03:38,750
预测的偏差。
using the guess the next word objective.
82
00:03:39,720 --> 00:03:41,040
将提示的性别
Changing the gender of the prompt
83
00:03:41,040 --> 00:03:44,250
从“他”更改到“她”
from he was very to she was very
84
00:03:44,250 --> 00:03:47,550
会使预测从主要是中性形容词
changed the predictions from mostly neutral adjectives
85
00:03:47,550 --> 00:03:49,233
变为几乎只有物理上的形容词。
to almost only physical ones.
86
00:03:50,400 --> 00:03:52,367
在他们的 GPT-2 的模型卡中,
In their model card of the GPT-2 model,
87
00:03:52,367 --> 00:03:54,990
OpenAI 也承认了它的偏见
OpenAI also acknowledges its bias
88
00:03:54,990 --> 00:03:56,730
并且不鼓励在与人类交互的系统中
and discourages its use
89
00:03:56,730 --> 00:03:58,803
使用它。
in systems that interact with humans.
90
00:04:01,040 --> 00:04:03,707
(空气呼啸)
(air whooshing)
| course/subtitles/zh-CN/03_what-is-transfer-learning.srt/0 | {
"file_path": "course/subtitles/zh-CN/03_what-is-transfer-learning.srt",
"repo_id": "course",
"token_count": 4937
} | 174 |
1
00:00:00,213 --> 00:00:02,963
(滑动嗖嗖声)
(slide whooshes)
2
00:00:05,340 --> 00:00:08,373
- 本节将带来 Hugging Face Datasets 库的快速概览。
- The Hugging Face Datasets library, a quick overview.
3
00:00:09,990 --> 00:00:11,670
Hugging Face Datasets 库
The Hugging Face Datasets library
4
00:00:11,670 --> 00:00:14,310
是一个提供 API 来快速下载的库
is a library that provides an API to quickly download
5
00:00:14,310 --> 00:00:17,610
许多公共数据集并对其进行预处理。
many public datasets and preprocess them.
6
00:00:17,610 --> 00:00:20,614
在本视频中,我们将探索如何做到这一点。
In this video we will explore how to do that.
7
00:00:20,614 --> 00:00:21,780
下载部分很简单,
The downloading part is easy,
8
00:00:21,780 --> 00:00:23,760
使用 load_dataset 函数。
with the load_dataset function.
9
00:00:23,760 --> 00:00:26,460
你可以直接下载并缓存数据集
You can directly download and cache a dataset
10
00:00:26,460 --> 00:00:28,473
来自其在数据集中心的标识符。
from its identifier on the Dataset hub.
11
00:00:29,640 --> 00:00:33,570
在这里,我们从 GLUE 基准中获取 MRPC 数据集,
Here, we fetch the MRPC dataset from the GLUE benchmark,
12
00:00:33,570 --> 00:00:36,390
这是一个包含成对句子的数据集
which is a dataset containing pairs of sentences
13
00:00:36,390 --> 00:00:38,740
任务是确定释义。
where the task is to determine the paraphrases.
14
00:00:39,810 --> 00:00:42,420
load_dataset 函数返回的对象
The object returned by the load_dataset function
15
00:00:42,420 --> 00:00:45,600
是一个 DatasetDict,它是一种字典
is a DatasetDict, which is a sort of dictionary
16
00:00:45,600 --> 00:00:47,463
包含我们数据集的每个分割。
containing each split of our dataset.
17
00:00:48,946 --> 00:00:52,170
我们可以通过使用其名称进行索引来访问每个拆分。
We can access each split by indexing with its name.
18
00:00:52,170 --> 00:00:55,047
这个拆分然后是 Dataset 类的一个实例,
This split is then an instance of the Dataset class,
19
00:00:55,047 --> 00:00:58,590
有列,这里是 sentence1,sentence2,
with columns, here sentence1, sentence2,
20
00:00:58,590 --> 00:01:01,233
标签和 idx,以及行。
label and idx, and rows.
21
00:01:02,400 --> 00:01:04,563
我们可以通过索引访问给定的元素。
We can access a given element by its index.
22
00:01:05,460 --> 00:01:08,220
Hugging Face Datasets 库的神奇之处
The amazing thing about the Hugging Face Datasets library
23
00:01:08,220 --> 00:01:11,880
是所有内容都使用 Apache Arrow 保存到磁盘,
is that everything is saved to disk using Apache Arrow,
24
00:01:11,880 --> 00:01:14,550
这意味着即使你的数据集很大,
which means that even if your dataset is huge,
25
00:01:14,550 --> 00:01:16,350
你不会离开 RAM。
you won't get out of RAM.
26
00:01:16,350 --> 00:01:19,113
只有你请求的元素才会加载到内存中。
Only the elements you request are loaded in memory.
27
00:01:20,340 --> 00:01:23,940
访问数据集的一部分就像访问一个元素一样简单。
Accessing a slice of your dataset is as easy as one element.
28
00:01:23,940 --> 00:01:26,220
结果是一个包含值列表的字典
The result is then a dictionary with list of values
29
00:01:26,220 --> 00:01:27,480
对于每个键。
for each keys.
30
00:01:27,480 --> 00:01:29,070
这里是标签列表,
Here the list of labels,
31
00:01:29,070 --> 00:01:30,147
第一句话列表
the list of first sentences
32
00:01:30,147 --> 00:01:31,923
和第二句话的列表。
and the list of second sentences.
33
00:01:33,690 --> 00:01:35,580
数据集的特征属性
The features attribute of a Dataset
34
00:01:35,580 --> 00:01:37,470
为我们提供有关其专栏的更多信息。
gives us more information about its columns.
35
00:01:37,470 --> 00:01:40,020
特别是,我们可以在这里看到
In particular, we can see here
36
00:01:40,020 --> 00:01:41,400
它给了我们信件
it gives us the correspondence
37
00:01:41,400 --> 00:01:44,810
在标签的整数和名称之间。
between the integers and names for the labels.
38
00:01:44,810 --> 00:01:48,543
零代表不等价,一代表等价。
Zero stands for not equivalent and one for equivalent.
39
00:01:49,830 --> 00:01:52,020
要预处理数据集的所有元素,
To preprocess all the elements of our dataset,
40
00:01:52,020 --> 00:01:53,850
我们需要将它们标记化。
we need to tokenize them.
41
00:01:53,850 --> 00:01:56,160
看看视频 “预处理句子对”
Have a look at the video "Preprocess sentence pairs"
42
00:01:56,160 --> 00:01:57,570
复习一下,
for a refresher,
43
00:01:57,570 --> 00:01:59,430
但你只需要发送这两个句子
but you just have to send the two sentences
44
00:01:59,430 --> 00:02:02,733
带有一些额外的关键字参数的分词器。
to the tokenizer with some additional keyword arguments.
45
00:02:03,780 --> 00:02:06,600
这里我们表示最大长度为 128
Here we indicate a maximum length of 128
46
00:02:06,600 --> 00:02:08,820
和垫输入短于这个长度,
and pad inputs shorter than this length,
47
00:02:08,820 --> 00:02:10,420
截断更长的输入。
truncate inputs that are longer.
48
00:02:11,460 --> 00:02:13,470
我们把所有这些都放在一个 tokenize_function 中
We put all of this in a tokenize_function
49
00:02:13,470 --> 00:02:16,710
我们可以直接应用于数据集中的所有拆分
that we can directly apply to all the splits in our dataset
50
00:02:16,710 --> 00:02:17,710
用地图的方法。
with the map method.
51
00:02:18,840 --> 00:02:22,110
只要函数返回一个类似字典的对象,
As long as the function returns a dictionary-like object,
52
00:02:22,110 --> 00:02:24,300
map 方法将根据需要添加新列
the map method will add new columns as needed
53
00:02:24,300 --> 00:02:26,043
或更新现有的。
or update existing ones.
54
00:02:27,315 --> 00:02:28,830
加快预处理
To speed up preprocessing
55
00:02:28,830 --> 00:02:30,870
并利用我们的分词器这一事实
and take advantage of the fact our tokenizer
56
00:02:30,870 --> 00:02:32,040
由 Rust 支持,
is backed by Rust,
57
00:02:32,040 --> 00:02:34,770
感谢 Hugging Face Tokenizers 库,
thanks to the Hugging Face Tokenizers library,
58
00:02:34,770 --> 00:02:37,110
我们可以同时处理多个元素
we can process several elements at the same time
59
00:02:37,110 --> 00:02:40,710
到我们的 tokenize 函数,使用 batched=True 参数。
to our tokenize function, using the batched=True argument.
60
00:02:40,710 --> 00:02:42,120
由于分词器可以处理
Since the tokenizer can handle
61
00:02:42,120 --> 00:02:44,610
第一句话列表,第二句列表,
list of first sentences, list of second sentences,
62
00:02:44,610 --> 00:02:47,493
tokenize_function 不需要为此更改。
the tokenize_function does not need to change for this.
63
00:02:48,360 --> 00:02:51,180
你还可以将多处理与 map 方法一起使用。
You can also use multiprocessing with the map method.
64
00:02:51,180 --> 00:02:53,583
在链接的视频中查看其文档。
Check out its documentation in the linked video.
65
00:02:54,840 --> 00:02:57,990
完成后,我们几乎可以进行培训了。
Once this is done, we are almost ready for training.
66
00:02:57,990 --> 00:02:59,970
我们只是删除不再需要的列
We just remove the columns we don't need anymore
67
00:02:59,970 --> 00:03:02,190
使用 remove_columns 方法,
with the remove_columns method,
68
00:03:02,190 --> 00:03:03,750
将标签重命名为标签,
rename label to labels,
69
00:03:03,750 --> 00:03:05,790
因为来自 Hugging Face Transformers 的模型
since the models from the Hugging Face Transformers
70
00:03:05,790 --> 00:03:07,710
图书馆期望,
library expect that,
71
00:03:07,710 --> 00:03:10,470
并将输出格式设置为我们想要的后端,
and set the output format to our desired backend,
72
00:03:10,470 --> 00:03:12,053
火炬、TensorFlow 或 NumPy。
Torch, TensorFlow or NumPy.
73
00:03:13,440 --> 00:03:16,800
如果需要,我们还可以生成一个简短的数据集样本
If needed, we can also generate a short sample of a dataset
74
00:03:16,800 --> 00:03:18,000
使用选择方法。
using the select method.
75
00:03:20,211 --> 00:03:22,961
(滑动嗖嗖声)
(slide whooshes)
| course/subtitles/zh-CN/19_hugging-face-datasets-overview-(pytorch).srt/0 | {
"file_path": "course/subtitles/zh-CN/19_hugging-face-datasets-overview-(pytorch).srt",
"repo_id": "course",
"token_count": 4086
} | 175 |
1
00:00:00,195 --> 00:00:01,426
(屏幕呼啸)
(screen whooshing)
2
00:00:01,426 --> 00:00:02,614
(贴纸弹出)
(sticker popping)
3
00:00:02,614 --> 00:00:06,150
(屏幕呼啸)
(screen whooshing)
4
00:00:06,150 --> 00:00:08,430
- 加载自定义数据集。
- Loading a custom dataset.
5
00:00:08,430 --> 00:00:09,750
尽管 Hugging Face Hub 上承载了
Although the HuggingFace Hub hosts
6
00:00:09,750 --> 00:00:11,730
超过一千个公共数据集,
over a thousand public datasets,
7
00:00:11,730 --> 00:00:12,930
你可能仍然需要经常处理存储在你的笔记本电脑
you'll often need to work with data
8
00:00:12,930 --> 00:00:15,900
或存储在远程服务器上的数据。
that is stored on your laptop or some remote server.
9
00:00:15,900 --> 00:00:18,060
在本视频中,我们将探讨如何利用 Datasets 库
In this video, we'll explore how the Datasets library
10
00:00:18,060 --> 00:00:20,310
加载 Hugging Face Hub 以外
can be used to load datasets that aren't available
11
00:00:20,310 --> 00:00:21,510
的数据集。
on the Hugging Face Hub.
12
00:00:22,980 --> 00:00:25,290
正如你在此表中所见,Datasets 库
As you can see in this table, the Datasets library
13
00:00:25,290 --> 00:00:26,700
提供了几个内置脚本
provides several in-built scripts
14
00:00:26,700 --> 00:00:29,370
以多种格式加载数据集。
to load datasets in several formats.
15
00:00:29,370 --> 00:00:31,200
要以其中一种格式加载数据集,
To load a dataset in one of these formats,
16
00:00:31,200 --> 00:00:32,730
你只需要向 load_dataset 函数
you just need to provide the name of the format
17
00:00:32,730 --> 00:00:34,350
提供格式的名称,
to the load_dataset function,
18
00:00:34,350 --> 00:00:35,790
并且连同 data_files 参数一起传入
along with a data_files argument
19
00:00:35,790 --> 00:00:37,610
该参数指向一个或多个文件路径或 URL。
that points to one or more filepaths or URLs.
20
00:00:40,350 --> 00:00:43,590
要查看实际效果,让我们从加载 CSV 文件开始。
To see this in action, let's start by loading a CSV file.
21
00:00:43,590 --> 00:00:45,960
在这个例子中,我们首先下载一个数据集
In this example, we first download a dataset
22
00:00:45,960 --> 00:00:48,963
该数据集是来自 UCI 机器学习库的葡萄酒质量数据。
about wine quality from the UCI machine learning repository.
23
00:00:50,220 --> 00:00:52,590
由于这是一个 CSV 文件,因此我们指定
Since this is a CSV file, we then specify
24
00:00:52,590 --> 00:00:53,943
CSV 加载脚本。
the CSV loading script.
25
00:00:55,320 --> 00:00:57,570
现在,这个脚本需要知道我们的数据在哪里,
Now, this script needs to know where our data is located,
26
00:00:57,570 --> 00:00:58,650
所以我们提供文件名
so we provide the filename
27
00:00:58,650 --> 00:01:00,483
作为 data_files 参数的一部分。
as part of the data_files argument.
28
00:01:01,860 --> 00:01:03,360
并且加载脚本还允许你
And the loading script also allows you
29
00:01:03,360 --> 00:01:05,040
传递几个关键字参数,
to pass several keyword arguments,
30
00:01:05,040 --> 00:01:06,750
所以在这里我们也指定了
so here we've also specified
31
00:01:06,750 --> 00:01:09,030
分号作为分隔符。
that the separator is a semi-colon.
32
00:01:09,030 --> 00:01:10,380
这样,我们就可以看到数据集
And with that, we can see the dataset
33
00:01:10,380 --> 00:01:13,020
作为 DatasetDict 对象自动加载,
is loaded automatically as a DatasetDict object,
34
00:01:13,020 --> 00:01:15,920
CSV 文件中的每一列都代表一个特征。
with each column in the CSV file represented as a feature.
35
00:01:17,610 --> 00:01:20,280
如果你的数据集位于 GitHub 等远程服务器上
If your dataset is located on some remote server like GitHub
36
00:01:20,280 --> 00:01:22,050
或其他一些数据仓库,
or some other repository,
37
00:01:22,050 --> 00:01:23,700
这个过程实际上非常相似。
the process is actually very similar.
38
00:01:23,700 --> 00:01:25,980
唯一的区别是现在 data_files 参数
The only difference is that now the data_files argument
39
00:01:25,980 --> 00:01:28,623
指向 URL 而不是本地文件路径。
points to a URL instead of a local filepath.
40
00:01:30,330 --> 00:01:33,270
现在让我们看一下加载原始文本文件。
Let's now take a look at loading raw text files.
41
00:01:33,270 --> 00:01:35,100
这种格式在 NLP 中很常见,
This format is quite common in NLP,
42
00:01:35,100 --> 00:01:36,750
你常常会发现书籍和戏剧
and you'll typically find books and plays
43
00:01:36,750 --> 00:01:39,393
只是一个包含原始文本的独立文件。
are just a single file with raw text inside.
44
00:01:40,410 --> 00:01:43,020
在这个例子中,我们有一个莎士比亚戏剧的文本文件
In this example, we have a text file of Shakespeare plays
45
00:01:43,020 --> 00:01:45,330
存储在 GitHub 仓库中。
that's stored on a GitHub repository.
46
00:01:45,330 --> 00:01:47,040
正如我们对 CSV 文件所做的那样,
And as we did for CSV files,
47
00:01:47,040 --> 00:01:49,020
我们只需选择文本加载脚本
we simply choose the text loading script
48
00:01:49,020 --> 00:01:51,423
并将 data_files 参数指向 URL。
and point the data_files argument to the URL.
49
00:01:52,260 --> 00:01:55,110
如你所见,这些文件是逐行处理的,
As you can see, these files are processed line-by-line,
50
00:01:55,110 --> 00:01:57,690
所以原始文本中的空行
so empty lines in the raw text are also represented
51
00:01:57,690 --> 00:01:58,953
也按照数据集中的一行表示。
as a row in the dataset.
52
00:02:00,810 --> 00:02:04,230
对于 JSON 文件,有两种主要格式需要了解。
For JSON files, there are two main formats to know about.
53
00:02:04,230 --> 00:02:06,060
第一个叫做 JSON 行,
The first one is called JSON Lines,
54
00:02:06,060 --> 00:02:09,510
文件中的每一行都是一个单独的 JSON 对象。
where every row in the file is a separate JSON object.
55
00:02:09,510 --> 00:02:11,100
对于这些文件,你可以通过选择 JSON 加载脚本
For these files, you can load the dataset
56
00:02:11,100 --> 00:02:13,020
来加载数据集
by selecting the JSON loading script
57
00:02:13,020 --> 00:02:16,143
并将 data_files 参数指向文件或 URL。
and pointing the data_files argument to the file or URL.
58
00:02:17,160 --> 00:02:19,410
在这个例子中,我们加载了一个多行 JSON 的文件
In this example, we've loaded a JSON lines files
59
00:02:19,410 --> 00:02:21,710
其内容基于 Stack Exchange 问题和答案。
based on Stack Exchange questions and answers.
60
00:02:23,490 --> 00:02:26,610
另一种格式是嵌套的 JSON 文件。
The other format is nested JSON files.
61
00:02:26,610 --> 00:02:29,100
这些文件基本上看起来像一本巨大的字典,
These files basically look like one huge dictionary,
62
00:02:29,100 --> 00:02:31,200
所以 load_dataset 函数允许你指定
so the load_dataset function allow you to specify
63
00:02:31,200 --> 00:02:32,733
要加载哪个特定关键词。
which specific key to load.
64
00:02:33,630 --> 00:02:35,910
例如,用于问答的 SQuAD 数据集有它的格式,
For example, the SQuAD dataset for question and answering
65
00:02:35,910 --> 00:02:38,340
我们可以通过指定我们感兴趣的数据字段
has its format, and we can load it by specifying
66
00:02:38,340 --> 00:02:40,340
我们对 data 字段感兴趣。
that we're interested in the data field.
67
00:02:41,400 --> 00:02:42,780
最后要和大家分享的内容是
There is just one last thing to mention
68
00:02:42,780 --> 00:02:44,910
关于所有这些加载脚本。
about all of these loading scripts.
69
00:02:44,910 --> 00:02:46,410
你可以有不止一次数据切分,
You can have more than one split,
70
00:02:46,410 --> 00:02:49,080
你可以通过将数据文件视为字典来加载它们,
you can load them by treating data files as a dictionary,
71
00:02:49,080 --> 00:02:52,140
并将每个拆分的名称映射到其对应的文件。
and map each split name to its corresponding file.
72
00:02:52,140 --> 00:02:53,970
其他一切都保持完全不变
Everything else stays completely unchanged
73
00:02:53,970 --> 00:02:55,350
你可以看到一个例子,
and you can see an example of loading
74
00:02:55,350 --> 00:02:58,283
加载此 SQuAD 的训练和验证分解步骤都在这里。
both the training and validation splits for this SQuAD here.
75
00:02:59,550 --> 00:03:02,310
这样,你现在可以加载来自笔记本电脑的数据集,来自 Hugging Face Hub 的数据集,
And with that, you can now load datasets from your laptop,
76
00:03:02,310 --> 00:03:04,653
或来自任何其他地方的数据集。
the Hugging Face Hub, or anywhere else want.
77
00:03:06,277 --> 00:03:09,194
(屏幕呼啸)
(screen whooshing)
| course/subtitles/zh-CN/35_loading-a-custom-dataset.srt/0 | {
"file_path": "course/subtitles/zh-CN/35_loading-a-custom-dataset.srt",
"repo_id": "course",
"token_count": 4464
} | 176 |
1
00:00:00,125 --> 00:00:05,125
(空气呼啸)
(air whooshing)
2
00:00:05,190 --> 00:00:06,720
- 你是在正确的地方
- You are at the right place
3
00:00:06,720 --> 00:00:10,464
如果你想了解什么是字节对编码(BPE)算法
if you want to understand what the Byte Pair Encoding
4
00:00:10,464 --> 00:00:13,263
子词分词化算法是,
subword tokenization algorithm is,
5
00:00:14,160 --> 00:00:15,505
如何训练它
how to train it
6
00:00:15,505 --> 00:00:17,790
以及文本的分词化是如何完成的
and how the tokenization of a text is done
7
00:00:17,790 --> 00:00:19,107
用这个算法。
with this algorithm.
8
00:00:21,417 --> 00:00:22,920
BPE 算法
The BPE algorithm
9
00:00:22,920 --> 00:00:26,820
最初被提出作为文本压缩算法
was initially proposed as a text compression algorithm
10
00:00:26,820 --> 00:00:28,770
但它也非常适合
but it is also very well suited
11
00:00:28,770 --> 00:00:31,143
作为你的语言模型的分词器。
as a tokenizer for your language models.
12
00:00:32,910 --> 00:00:34,890
BPE 的思想是分词
The idea of BPE is to divide words
13
00:00:34,890 --> 00:00:36,933
进入一系列 “子词单元”
into a sequence of'subword units'
14
00:00:38,100 --> 00:00:41,970
其是参考语料库中频繁出现的单位
which are units that appear frequently in a reference corpus
15
00:00:41,970 --> 00:00:44,613
也是我们用来训练它的语料库。
which is, the corpus we used to train it.
16
00:00:46,701 --> 00:00:49,083
BPE 分词器是如何训练的?
How is a BPE tokenizer trained?
17
00:00:50,100 --> 00:00:53,340
首先,我们必须得到一个文本语料库。
First of all, we have to get a corpus of texts.
18
00:00:53,340 --> 00:00:56,940
我们不会在这个原始文本上训练我们的分词器
We will not train our tokenizer on this raw text
19
00:00:56,940 --> 00:00:59,490
但我们首先将其规范化
but we will first normalize it
20
00:00:59,490 --> 00:01:00,873
然后对其进行预标记。
then pre-tokenize it.
21
00:01:01,890 --> 00:01:03,240
作为预标记化
As the pre-tokenization
22
00:01:03,240 --> 00:01:05,790
将文本分成单词列表,
divides the text into a list of words,
23
00:01:05,790 --> 00:01:08,400
我们可以用另一种方式表示我们的语料库
we can represent our corpus in another way
24
00:01:08,400 --> 00:01:10,350
通过收集相同的词
by gathering together the same words
25
00:01:10,350 --> 00:01:12,450
并通过维护一个柜,
and by maintaining a counter,
26
00:01:12,450 --> 00:01:14,223
这里用蓝色表示。
here represented in blue.
27
00:01:17,340 --> 00:01:19,860
要了解训练的工作原理,
To understand how the training works,
28
00:01:19,860 --> 00:01:23,730
我们认为这个小语料库由以下单词组成:
we consider this toy corpus composed of the following words:
29
00:01:23,730 --> 00:01:28,203
huggingface, hugging, hug, hugger, 等
huggingface, hugging, hug, hugger, etc.
30
00:01:29,100 --> 00:01:32,640
BPE 是一种从初始词汇表开始的算法
BPE is an algorithm that starts with an initial vocabulary
31
00:01:32,640 --> 00:01:35,583
然后将其增加到所需的大小。
and then increases it to the desired size.
32
00:01:36,450 --> 00:01:38,460
为了建立初始词汇表,
To build the initial vocabulary,
33
00:01:38,460 --> 00:01:41,550
我们从分离语料库的每个词开始
we start by separating each word of the corpus
34
00:01:41,550 --> 00:01:44,253
组成它们的基本单元列表,
into a list of elementary units that compose them,
35
00:01:45,210 --> 00:01:47,013
在这里, 字符。
here, the characters.
36
00:01:50,850 --> 00:01:54,310
我们在词汇表中列出所有出现的字符
We list in our vocabulary all the characters that appear
37
00:01:55,218 --> 00:01:58,053
这将构成我们最初的词汇表。
and that will constitute our initial vocabulary.
38
00:02:00,420 --> 00:02:02,523
现在让我们看看如何增加它。
Let's now see how to increase it.
39
00:02:05,520 --> 00:02:08,250
我们回到我们拆分的语料库,
We return to our split corpus,
40
00:02:08,250 --> 00:02:11,340
我们将逐字逐句
we will go through the words one by one
41
00:02:11,340 --> 00:02:14,313
并计算 token 对的所有出现次数。
and count all the occurrences of token pairs.
42
00:02:15,450 --> 00:02:18,397
第一对由标记 “h” 和 “u” 组成,
The first pair is composed of the token 'h' and 'u',
43
00:02:20,130 --> 00:02:23,067
第二个 "u" 和 "g",
the second 'u' and 'g',
44
00:02:23,067 --> 00:02:26,253
我们继续这样,直到我们有完整的列表。
and we continue like that until we have the complete list.
45
00:02:35,580 --> 00:02:37,724
一旦我们知道所有的对
Once we know all the pairs
46
00:02:37,724 --> 00:02:40,140
以及它们出现的频率,
and their frequency of appearance,
47
00:02:40,140 --> 00:02:42,940
我们将选择出现频率最高的那个。
we will choose the one that appears the most frequently.
48
00:02:44,220 --> 00:02:47,697
这是由字母 “l” 和 “e” 组成的一对。
Here it is the pair composed of the letters 'l' and 'e'.
49
00:02:51,930 --> 00:02:53,590
我们注意到我们的第一个合并规则
We note our first merging rule
50
00:02:54,593 --> 00:02:57,243
然后我们将新 token 添加到我们的词汇表中。
and we add the new token to our vocabulary.
51
00:03:00,330 --> 00:03:04,260
然后我们可以将此合并规则应用于我们的拆分。
We can then apply this merging rule to our splits.
52
00:03:04,260 --> 00:03:07,350
你可以看到我们已经合并了所有的 token 对
You can see that we have merged all the pairs of tokens
53
00:03:07,350 --> 00:03:09,793
由标记 “l” 和 “e” 组成。
composed of the tokens 'l' and 'e'.
54
00:03:14,008 --> 00:03:18,150
现在,我们只需要重现相同的步骤
And now, we just have to reproduce the same steps
55
00:03:18,150 --> 00:03:19,353
与我们的新拆分。
with our new splits.
56
00:03:21,750 --> 00:03:23,460
我们计算出现频率
We calculate the frequency of occurrence
57
00:03:23,460 --> 00:03:25,023
对每对 token ,
of each pair of tokens,
58
00:03:27,990 --> 00:03:30,603
我们选择频率最高的一对,
we select the pair with the highest frequency,
59
00:03:32,190 --> 00:03:34,083
我们在我们的合并规则中注意到它,
we note it in our merge rules,
60
00:03:36,000 --> 00:03:39,360
我们将新的 token 添加到词汇表中
we add the new one token the vocabulary
61
00:03:39,360 --> 00:03:41,880
然后我们合并所有的 token 对
and then we merge all the pairs of tokens
62
00:03:41,880 --> 00:03:46,503
由标记 “le” 和 “a” 组成,进入我们的拆分。
composed of the token 'le' and 'a' into our splits.
63
00:03:50,323 --> 00:03:51,960
我们可以重复这个操作
And we can repeat this operation
64
00:03:51,960 --> 00:03:54,843
直到我们达到所需的词汇量。
until we reach the desired vocabulary size.
65
00:04:05,671 --> 00:04:10,671
在这里,当我们的词汇量达到 21 个 token 时,我们就停止了。
Here, we stopped when our vocabulary reached 21 tokens.
66
00:04:11,040 --> 00:04:13,920
我们现在可以看到,比起训练开始时
We can see now that the words of our corpus
67
00:04:13,920 --> 00:04:17,040
我们的语料库中的单词
are now divided into far fewer tokens
68
00:04:17,040 --> 00:04:20,280
被分成了更少的 token
than at the beginning of the training.
69
00:04:20,280 --> 00:04:21,720
而我们的算法
And that our algorithm
70
00:04:21,720 --> 00:04:24,990
学会了部首 “hug” 和 “learn”
has learned the radicals 'hug' and 'learn'
71
00:04:24,990 --> 00:04:27,537
以及动词结尾 “ing”。
and also the verbal ending 'ing'.
72
00:04:29,880 --> 00:04:32,160
现在我们已经学会了我们的词汇
Now that we have learned our vocabulary
73
00:04:32,160 --> 00:04:35,943
和合并规则,我们可以标记新文本。
and merging rules, we can tokenize new texts.
74
00:04:37,980 --> 00:04:39,210
例如,
For example,
75
00:04:39,210 --> 00:04:41,160
如果我们想标记 “hugs” 这个词,
if we want to tokenize the word 'hugs',
76
00:04:42,960 --> 00:04:46,680
首先我们将它分成基本单元
first we'll divide it into elementary units
77
00:04:46,680 --> 00:04:48,843
所以它变成了一个字符序列。
so it became a sequence of characters.
78
00:04:50,040 --> 00:04:52,020
然后,我们将通过我们的合并规则
Then, we'll go through our merge rules
79
00:04:52,020 --> 00:04:54,690
直到我们有一个我们可以应用。
until we have one we can apply.
80
00:04:54,690 --> 00:04:57,930
在这里,我们可以合并字母 “h” 和 “u”。
Here, we can merge the letters 'h' and 'u'.
81
00:04:57,930 --> 00:05:01,467
在这里,我们可以合并 2 个 token 以获得新 token “hug”。
And here, we can merge 2 tokens to get the new token 'hug'.
82
00:05:02,400 --> 00:05:05,760
当我们到达合并规则的末尾时,
When we get to the end of our merge rules,
83
00:05:05,760 --> 00:05:07,563
分词化完成。
the tokenization is finished.
84
00:05:10,650 --> 00:05:11,727
就是这样。
And that's it.
85
00:05:12,846 --> 00:05:14,850
我希望现在的 BPE 算法
I hope that now the BPE algorithm
86
00:05:14,850 --> 00:05:16,413
对你而言不再是秘密!
has no more secret for you!
87
00:05:17,739 --> 00:05:20,406
(空气呼啸)
(air whooshing)
| course/subtitles/zh-CN/51_byte-pair-encoding-tokenization.srt/0 | {
"file_path": "course/subtitles/zh-CN/51_byte-pair-encoding-tokenization.srt",
"repo_id": "course",
"token_count": 4768
} | 177 |
# How to add one new datasets
Add datasets directly to the 🤗 Hugging Face Hub!
You can share your dataset on https://huggingface.co/datasets directly using your account, see the documentation:
* [Create a dataset and upload files on the website](https://huggingface.co/docs/datasets/upload_dataset)
* [Advanced guide using the CLI](https://huggingface.co/docs/datasets/share)
| datasets/ADD_NEW_DATASET.md/0 | {
"file_path": "datasets/ADD_NEW_DATASET.md",
"repo_id": "datasets",
"token_count": 113
} | 178 |
# Differences between Dataset and IterableDataset
There are two types of dataset objects, a [`Dataset`] and an [`IterableDataset`].
Whichever type of dataset you choose to use or create depends on the size of the dataset.
In general, an [`IterableDataset`] is ideal for big datasets (think hundreds of GBs!) due to its lazy behavior and speed advantages, while a [`Dataset`] is great for everything else.
This page will compare the differences between a [`Dataset`] and an [`IterableDataset`] to help you pick the right dataset object for you.
## Downloading and streaming
When you have a regular [`Dataset`], you can access it using `my_dataset[0]`. This provides random access to the rows.
Such datasets are also called "map-style" datasets.
For example you can download ImageNet-1k like this and access any row:
```python
from datasets import load_dataset
imagenet = load_dataset("imagenet-1k", split="train") # downloads the full dataset
print(imagenet[0])
```
But one caveat is that you must have the entire dataset stored on your disk or in memory, which blocks you from accessing datasets bigger than the disk.
Because it can become inconvenient for big datasets, there exists another type of dataset, the [`IterableDataset`].
When you have an `IterableDataset`, you can access it using a `for` loop to load the data progressively as you iterate over the dataset.
This way, only a small fraction of examples is loaded in memory, and you don't write anything on disk.
For example, you can stream the ImageNet-1k dataset without downloading it on disk:
```python
from datasets import load_dataset
imagenet = load_dataset("imagenet-1k", split="train", streaming=True) # will start loading the data when iterated over
for example in imagenet:
print(example)
break
```
Streaming can read online data without writing any file to disk.
For example, you can stream datasets made out of multiple shards, each of which is hundreds of gigabytes like [C4](https://huggingface.co/datasets/c4), [OSCAR](https://huggingface.co/datasets/oscar) or [LAION-2B](https://huggingface.co/datasets/laion/laion2B-en).
Learn more about how to stream a dataset in the [Dataset Streaming Guide](./stream).
This is not the only difference though, because the "lazy" behavior of an `IterableDataset` is also present when it comes to dataset creation and processing.
## Creating map-style datasets and iterable datasets
You can create a [`Dataset`] using lists or dictionaries, and the data is entirely converted to Arrow so you can easily access any row:
```python
my_dataset = Dataset.from_dict({"col_1": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]})
print(my_dataset[0])
```
To create an `IterableDataset` on the other hand, you must provide a "lazy" way to load the data.
In Python, we generally use generator functions. These functions `yield` one example at a time, which means you can't access a row by slicing it like a regular `Dataset`:
```python
def my_generator(n):
for i in range(n):
yield {"col_1": i}
my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs={"n": 10})
for example in my_iterable_dataset:
print(example)
break
```
## Loading local files entirely and progressively
It is possible to convert local or remote data files to an Arrow [`Dataset`] using [`load_dataset`]:
```python
data_files = {"train": ["path/to/data.csv"]}
my_dataset = load_dataset("csv", data_files=data_files, split="train")
print(my_dataset[0])
```
However, this requires a conversion step from CSV to Arrow format, which takes time and disk space if your dataset is big.
To save disk space and skip the conversion step, you can define an `IterableDataset` by streaming from the local files directly.
This way, the data is read progressively from the local files as you iterate over the dataset:
```python
data_files = {"train": ["path/to/data.csv"]}
my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True)
for example in my_iterable_dataset: # this reads the CSV file progressively as you iterate over the dataset
print(example)
break
```
Many file formats are supported, like CSV, JSONL, and Parquet, as well as image and audio files.
You can find more information in the corresponding guides for loading [tabular](./tabular_load), [text](./nlp_load), [vision](./image_load), and [audio](./audio_load]) datasets.
## Eager data processing and lazy data processing
When you process a [`Dataset`] object using [`Dataset.map`], the entire dataset is processed immediately and returned.
This is similar to how `pandas` works for example.
```python
my_dataset = my_dataset.map(process_fn) # process_fn is applied on all the examples of the dataset
print(my_dataset[0])
```
On the other hand, due to the "lazy" nature of an `IterableDataset`, calling [`IterableDataset.map`] does not apply your `map` function over the full dataset.
Instead, your `map` function is applied on-the-fly.
Because of that, you can chain multiple processing steps and they will all run at once when you start iterating over the dataset:
```python
my_iterable_dataset = my_iterable_dataset.map(process_fn_1)
my_iterable_dataset = my_iterable_dataset.filter(filter_fn)
my_iterable_dataset = my_iterable_dataset.map(process_fn_2)
# process_fn_1, filter_fn and process_fn_2 are applied on-the-fly when iterating over the dataset
for example in my_iterable_dataset:
print(example)
break
```
## Exact and fast approximate shuffling
When you shuffle a [`Dataset`] using [`Dataset.shuffle`], you apply an exact shuffling of the dataset.
It works by taking a list of indices `[0, 1, 2, ... len(my_dataset) - 1]` and shuffling this list.
Then, accessing `my_dataset[0]` returns the row and index defined by the first element of the indices mapping that has been shuffled:
```python
my_dataset = my_dataset.shuffle(seed=42)
print(my_dataset[0])
```
Since we don't have random access to the rows in the case of an `IterableDataset`, we can't use a shuffled list of indices and access a row at an arbitrary position.
This prevents the use of exact shuffling.
Instead, a fast approximate shuffling is used in [`IterableDataset.shuffle`].
It uses a shuffle buffer to sample random examples iteratively from the dataset.
Since the dataset is still read iteratively, it provides excellent speed performance:
```python
my_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in my_iterable_dataset:
print(example)
break
```
But using a shuffle buffer is not enough to provide a satisfactory shuffling for machine learning model training. So [`IterableDataset.shuffle`] also shuffles the dataset shards if your dataset is made of multiple files or sources:
```python
# Stream from the internet
my_iterable_dataset = load_dataset("deepmind/code_contests", split="train", streaming=True)
my_iterable_dataset.n_shards # 39
# Stream from local files
data_files = {"train": [f"path/to/data_{i}.csv" for i in range(1024)]}
my_iterable_dataset = load_dataset("csv", data_files=data_files, split="train", streaming=True)
my_iterable_dataset.n_shards # 1024
# From a generator function
def my_generator(n, sources):
for source in sources:
for example_id_for_current_source in range(n):
yield {"example_id": f"{source}_{example_id_for_current_source}"}
gen_kwargs = {"n": 10, "sources": [f"path/to/data_{i}" for i in range(1024)]}
my_iterable_dataset = IterableDataset.from_generator(my_generator, gen_kwargs=gen_kwargs)
my_iterable_dataset.n_shards # 1024
```
## Speed differences
Regular [`Dataset`] objects are based on Arrow which provides fast random access to the rows.
Thanks to memory mapping and the fact that Arrow is an in-memory format, reading data from disk doesn't do expensive system calls and deserialization.
It provides even faster data loading when iterating using a `for` loop by iterating on contiguous Arrow record batches.
However as soon as your [`Dataset`] has an indices mapping (via [`Dataset.shuffle`] for example), the speed can become 10x slower.
This is because there is an extra step to get the row index to read using the indices mapping, and most importantly, you aren't reading contiguous chunks of data anymore.
To restore the speed, you'd need to rewrite the entire dataset on your disk again using [`Dataset.flatten_indices`], which removes the indices mapping.
This may take a lot of time depending of the size of your dataset though:
```python
my_dataset[0] # fast
my_dataset = my_dataset.shuffle(seed=42)
my_dataset[0] # up to 10x slower
my_dataset = my_dataset.flatten_indices() # rewrite the shuffled dataset on disk as contiguous chunks of data
my_dataset[0] # fast again
```
In this case, we recommend switching to an [`IterableDataset`] and leveraging its fast approximate shuffling method [`IterableDataset.shuffle`].
It only shuffles the shards order and adds a shuffle buffer to your dataset, which keeps the speed of your dataset optimal.
You can also reshuffle the dataset easily:
```python
for example in enumerate(my_iterable_dataset): # fast
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=42, buffer_size=100)
for example in enumerate(shuffled_iterable_dataset): # as fast as before
pass
shuffled_iterable_dataset = my_iterable_dataset.shuffle(seed=1337, buffer_size=100) # reshuffling using another seed is instantaneous
for example in enumerate(shuffled_iterable_dataset): # still as fast as before
pass
```
If you're using your dataset on multiple epochs, the effective seed to shuffle the shards order in the shuffle buffer is `seed + epoch`.
It makes it easy to reshuffle a dataset between epochs:
```python
for epoch in range(n_epochs):
my_iterable_dataset.set_epoch(epoch)
for example in my_iterable_dataset: # fast + reshuffled at each epoch using `effective_seed = seed + epoch`
pass
```
## Switch from map-style to iterable
If you want to benefit from the "lazy" behavior of an [`IterableDataset`] or their speed advantages, you can switch your map-style [`Dataset`] to an [`IterableDataset`]:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset()
```
If you want to shuffle your dataset or [use it with a PyTorch DataLoader](./use_with_pytorch#stream-data), we recommend generating a sharded [`IterableDataset`]:
```python
my_iterable_dataset = my_dataset.to_iterable_dataset(num_shards=1024)
my_iterable_dataset.n_shards # 1024
```
| datasets/docs/source/about_mapstyle_vs_iterable.mdx/0 | {
"file_path": "datasets/docs/source/about_mapstyle_vs_iterable.mdx",
"repo_id": "datasets",
"token_count": 3261
} | 179 |
# Metrics
<Tip warning={true}>
Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets.
</Tip>
Metrics are important for evaluating a model's predictions. In the tutorial, you learned how to compute a metric over an entire evaluation set. You have also seen how to load a metric.
This guide will show you how to:
- Add predictions and references.
- Compute metrics using different methods.
- Write your own metric loading script.
## Add predictions and references
When you want to add model predictions and references to a [`Metric`] instance, you have two options:
- [`Metric.add`] adds a single `prediction` and `reference`.
- [`Metric.add_batch`] adds a batch of `predictions` and `references`.
Use [`Metric.add_batch`] by passing it your model predictions, and the references the model predictions should be evaluated against:
```py
>>> import datasets
>>> metric = datasets.load_metric('my_metric')
>>> for model_input, gold_references in evaluation_dataset:
... model_predictions = model(model_inputs)
... metric.add_batch(predictions=model_predictions, references=gold_references)
>>> final_score = metric.compute()
```
<Tip>
Metrics accepts various input formats (Python lists, NumPy arrays, PyTorch tensors, etc.) and converts them to an appropriate format for storage and computation.
</Tip>
## Compute scores
The most straightforward way to calculate a metric is to call [`Metric.compute`]. But some metrics have additional arguments that allow you to modify the metrics behavior.
Let's load the [SacreBLEU](https://huggingface.co/metrics/sacrebleu) metric, and compute it with a different smoothing method.
1. Load the SacreBLEU metric:
```py
>>> import datasets
>>> metric = datasets.load_metric('sacrebleu')
```
2. Inspect the different argument methods for computing the metric:
```py
>>> print(metric.inputs_description)
Produces BLEU scores along with its sufficient statistics
from a source against one or more references.
Args:
predictions: The system stream (a sequence of segments).
references: A list of one or more reference streams (each a sequence of segments).
smooth_method: The smoothing method to use. (Default: 'exp').
smooth_value: The smoothing value. Only valid for 'floor' and 'add-k'. (Defaults: floor: 0.1, add-k: 1).
tokenize: Tokenization method to use for BLEU. If not provided, defaults to 'zh' for Chinese, 'ja-mecab' for Japanese and '13a' (mteval) otherwise.
lowercase: Lowercase the data. If True, enables case-insensitivity. (Default: False).
force: Insist that your tokenized input is actually detokenized.
...
```
3. Compute the metric with the `floor` method, and a different `smooth_value`:
```py
>>> score = metric.compute(smooth_method="floor", smooth_value=0.2)
```
<a id='metric_script'></a>
## Custom metric loading script
Write a metric loading script to use your own custom metric (or one that is not on the Hub). Then you can load it as usual with [`load_metric`].
To help you get started, open the [SQuAD metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/squad/squad.py) and follow along.
<Tip>
Get jump started with our metric loading script [template](https://github.com/huggingface/datasets/blob/f9713d2e23813142a02f1b0e965095f528785cff/templates/new_metric_script.py)!
</Tip>
### Add metric attributes
Start by adding some information about your metric in [`Metric._info`]. The most important attributes you should specify are:
1. [`MetricInfo.description`] provides a brief description about your metric.
2. [`MetricInfo.citation`] contains a BibTex citation for the metric.
3. [`MetricInfo.inputs_description`] describes the expected inputs and outputs. It may also provide an example usage of the metric.
4. [`MetricInfo.features`] defines the name and type of the predictions and references.
After you've filled out all these fields in the template, it should look like the following example from the SQuAD metric script:
```py
class Squad(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
},
}
),
codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
)
```
### Download metric files
If your metric needs to download, or retrieve local files, you will need to use the [`Metric._download_and_prepare`] method. For this example, let's examine the [BLEURT metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/bleurt/bleurt.py).
1. Provide a dictionary of URLs that point to the metric files:
```py
CHECKPOINT_URLS = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
}
```
<Tip>
If the files are stored locally, provide a dictionary of path(s) instead of URLs.
</Tip>
2. [`Metric._download_and_prepare`] will take the URLs and download the metric files specified:
```py
def _download_and_prepare(self, dl_manager):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')."
)
self.config_name = "bleurt-base-128"
if self.config_name not in CHECKPOINT_URLS.keys():
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}"
)
# download the model checkpoint specified by self.config_name and set up the scorer
model_path = dl_manager.download_and_extract(CHECKPOINT_URLS[self.config_name])
self.scorer = score.BleurtScorer(os.path.join(model_path, self.config_name))
```
### Compute score
[`DatasetBuilder._compute`] provides the actual instructions for how to compute a metric given the predictions and references. Now let's take a look at the [GLUE metric loading script](https://github.com/huggingface/datasets/blob/main/metrics/glue/glue.py).
1. Provide the functions for [`DatasetBuilder._compute`] to calculate your metric:
```py
def simple_accuracy(preds, labels):
return (preds == labels).mean().item()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds).item()
return {
"accuracy": acc,
"f1": f1,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0].item()
spearman_corr = spearmanr(preds, labels)[0].item()
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
```
2. Create [`DatasetBuilder._compute`] with instructions for what metric to calculate for each configuration:
```py
def _compute(self, predictions, references):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(references, predictions)}
elif self.config_name == "stsb":
return pearson_and_spearman(predictions, references)
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_f1(predictions, references)
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(predictions, references)}
else:
raise KeyError(
"You should supply a configuration name selected in "
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]'
)
```
### Test
Once you're finished writing your metric loading script, try to load it locally:
```py
>>> from datasets import load_metric
>>> metric = load_metric('PATH/TO/MY/SCRIPT.py')
```
| datasets/docs/source/how_to_metrics.mdx/0 | {
"file_path": "datasets/docs/source/how_to_metrics.mdx",
"repo_id": "datasets",
"token_count": 3350
} | 180 |
# Loading methods
Methods for listing and loading datasets and metrics:
## Datasets
[[autodoc]] datasets.list_datasets
[[autodoc]] datasets.load_dataset
[[autodoc]] datasets.load_from_disk
[[autodoc]] datasets.load_dataset_builder
[[autodoc]] datasets.get_dataset_config_names
[[autodoc]] datasets.get_dataset_infos
[[autodoc]] datasets.get_dataset_split_names
[[autodoc]] datasets.inspect_dataset
## Metrics
<Tip warning={true}>
Metrics is deprecated in 🤗 Datasets. To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets.
</Tip>
[[autodoc]] datasets.list_metrics
[[autodoc]] datasets.load_metric
[[autodoc]] datasets.inspect_metric
## From files
Configurations used to load data files.
They are used when loading local files or a dataset repository:
- local files: `load_dataset("parquet", data_dir="path/to/data/dir")`
- dataset repository: `load_dataset("allenai/c4")`
You can pass arguments to `load_dataset` to configure data loading.
For example you can specify the `sep` parameter to define the [`~datasets.packaged_modules.csv.CsvConfig`] that is used to load the data:
```python
load_dataset("csv", data_dir="path/to/data/dir", sep="\t")
```
### Text
[[autodoc]] datasets.packaged_modules.text.TextConfig
[[autodoc]] datasets.packaged_modules.text.Text
### CSV
[[autodoc]] datasets.packaged_modules.csv.CsvConfig
[[autodoc]] datasets.packaged_modules.csv.Csv
### JSON
[[autodoc]] datasets.packaged_modules.json.JsonConfig
[[autodoc]] datasets.packaged_modules.json.Json
### Parquet
[[autodoc]] datasets.packaged_modules.parquet.ParquetConfig
[[autodoc]] datasets.packaged_modules.parquet.Parquet
### Arrow
[[autodoc]] datasets.packaged_modules.arrow.ArrowConfig
[[autodoc]] datasets.packaged_modules.arrow.Arrow
### SQL
[[autodoc]] datasets.packaged_modules.sql.SqlConfig
[[autodoc]] datasets.packaged_modules.sql.Sql
### Images
[[autodoc]] datasets.packaged_modules.imagefolder.ImageFolderConfig
[[autodoc]] datasets.packaged_modules.imagefolder.ImageFolder
### Audio
[[autodoc]] datasets.packaged_modules.audiofolder.AudioFolderConfig
[[autodoc]] datasets.packaged_modules.audiofolder.AudioFolder
### WebDataset
[[autodoc]] datasets.packaged_modules.webdataset.WebDataset
| datasets/docs/source/package_reference/loading_methods.mdx/0 | {
"file_path": "datasets/docs/source/package_reference/loading_methods.mdx",
"repo_id": "datasets",
"token_count": 809
} | 181 |
# Use with JAX
This document is a quick introduction to using `datasets` with JAX, with a particular focus on how to get
`jax.Array` objects out of our datasets, and how to use them to train JAX models.
<Tip>
`jax` and `jaxlib` are required to reproduce to code above, so please make sure you
install them as `pip install datasets[jax]`.
</Tip>
## Dataset format
By default, datasets return regular Python objects: integers, floats, strings, lists, etc., and
string and binary objects are unchanged, since JAX only supports numbers.
To get JAX arrays (numpy-like) instead, you can set the format of the dataset to `jax`:
```py
>>> from datasets import Dataset
>>> data = [[1, 2], [3, 4]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("jax")
>>> ds[0]
{'data': DeviceArray([1, 2], dtype=int32)}
>>> ds[:2]
{'data': DeviceArray([
[1, 2],
[3, 4]], dtype=int32)}
```
<Tip>
A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to JAX arrays.
</Tip>
Note that the exact same procedure applies to `DatasetDict` objects, so that
when setting the format of a `DatasetDict` to `jax`, all the `Dataset`s there
will be formatted as `jax`:
```py
>>> from datasets import DatasetDict
>>> data = {"train": {"data": [[1, 2], [3, 4]]}, "test": {"data": [[5, 6], [7, 8]]}}
>>> dds = DatasetDict.from_dict(data)
>>> dds = dds.with_format("jax")
>>> dds["train"][:2]
{'data': DeviceArray([
[1, 2],
[3, 4]], dtype=int32)}
```
Another thing you'll need to take into consideration is that the formatting is not applied
until you actually access the data. So if you want to get a JAX array out of a dataset,
you'll need to access the data first, otherwise the format will remain the same.
Finally, to load the data in the device of your choice, you can specify the `device` argument,
but note that `jaxlib.xla_extension.Device` is not supported as it's not serializable with neither
`pickle` not `dill`, so you'll need to use its string identifier instead:
```py
>>> import jax
>>> from datasets import Dataset
>>> data = [[1, 2], [3, 4]]
>>> ds = Dataset.from_dict({"data": data})
>>> device = str(jax.devices()[0]) # Not casting to `str` before passing it to `with_format` will raise a `ValueError`
>>> ds = ds.with_format("jax", device=device)
>>> ds[0]
{'data': DeviceArray([1, 2], dtype=int32)}
>>> ds[0]["data"].device()
TFRT_CPU_0
>>> assert ds[0]["data"].device() == jax.devices()[0]
True
```
Note that if the `device` argument is not provided to `with_format` then it will use the default
device which is `jax.devices()[0]`.
## N-dimensional arrays
If your dataset consists of N-dimensional arrays, you will see that by default they are considered as nested lists.
In particular, a JAX formatted dataset outputs a `DeviceArray` object, which is a numpy-like array, so it does not
need the [`Array`] feature type to be specified as opposed to PyTorch or TensorFlow formatters.
```py
>>> from datasets import Dataset
>>> data = [[[1, 2],[3, 4]], [[5, 6],[7, 8]]]
>>> ds = Dataset.from_dict({"data": data})
>>> ds = ds.with_format("jax")
>>> ds[0]
{'data': DeviceArray([[1, 2],
[3, 4]], dtype=int32)}
```
## Other feature types
[`ClassLabel`] data is properly converted to arrays:
```py
>>> from datasets import Dataset, Features, ClassLabel
>>> labels = [0, 0, 1]
>>> features = Features({"label": ClassLabel(names=["negative", "positive"])})
>>> ds = Dataset.from_dict({"label": labels}, features=features)
>>> ds = ds.with_format("jax")
>>> ds[:3]
{'label': DeviceArray([0, 0, 1], dtype=int32)}
```
String and binary objects are unchanged, since JAX only supports numbers.
The [`Image`] and [`Audio`] feature types are also supported.
<Tip>
To use the [`Image`] feature type, you'll need to install the `vision` extra as
`pip install datasets[vision]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Image
>>> images = ["path/to/image.png"] * 10
>>> features = Features({"image": Image()})
>>> ds = Dataset.from_dict({"image": images}, features=features)
>>> ds = ds.with_format("jax")
>>> ds[0]["image"].shape
(512, 512, 3)
>>> ds[0]
{'image': DeviceArray([[[ 255, 255, 255],
[ 255, 255, 255],
...,
[ 255, 255, 255],
[ 255, 255, 255]]], dtype=uint8)}
>>> ds[:2]["image"].shape
(2, 512, 512, 3)
>>> ds[:2]
{'image': DeviceArray([[[[ 255, 255, 255],
[ 255, 255, 255],
...,
[ 255, 255, 255],
[ 255, 255, 255]]]], dtype=uint8)}
```
<Tip>
To use the [`Audio`] feature type, you'll need to install the `audio` extra as
`pip install datasets[audio]`.
</Tip>
```py
>>> from datasets import Dataset, Features, Audio
>>> audio = ["path/to/audio.wav"] * 10
>>> features = Features({"audio": Audio()})
>>> ds = Dataset.from_dict({"audio": audio}, features=features)
>>> ds = ds.with_format("jax")
>>> ds[0]["audio"]["array"]
DeviceArray([-0.059021 , -0.03894043, -0.00735474, ..., 0.0133667 ,
0.01809692, 0.00268555], dtype=float32)
>>> ds[0]["audio"]["sampling_rate"]
DeviceArray(44100, dtype=int32, weak_type=True)
```
## Data loading
JAX doesn't have any built-in data loading capabilities, so you'll need to use a library such
as [PyTorch](https://pytorch.org/) to load your data using a `DataLoader` or [TensorFlow](https://www.tensorflow.org/)
using a `tf.data.Dataset`. Citing the [JAX documentation](https://jax.readthedocs.io/en/latest/notebooks/Neural_Network_and_Data_Loading.html#data-loading-with-pytorch) on this topic:
"JAX is laser-focused on program transformations and accelerator-backed NumPy, so we don’t
include data loading or munging in the JAX library. There are already a lot of great data loaders
out there, so let’s just use them instead of reinventing anything. We’ll grab PyTorch’s data loader,
and make a tiny shim to make it work with NumPy arrays.".
So that's the reason why JAX-formatting in `datasets` is so useful, because it lets you use
any model from the HuggingFace Hub with JAX, without having to worry about the data loading
part.
### Using `with_format('jax')`
The easiest way to get JAX arrays out of a dataset is to use the `with_format('jax')` method. Lets assume
that we want to train a neural network on the [MNIST dataset](http://yann.lecun.com/exdb/mnist/) available
at the HuggingFace Hub at https://huggingface.co/datasets/mnist.
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("mnist")
>>> ds = ds.with_format("jax")
>>> ds["train"][0]
{'image': DeviceArray([[ 0, 0, 0, ...],
[ 0, 0, 0, ...],
...,
[ 0, 0, 0, ...],
[ 0, 0, 0, ...]], dtype=uint8),
'label': DeviceArray(5, dtype=int32)}
```
Once the format is set we can feed the dataset to the JAX model in batches using the `Dataset.iter()`
method:
```py
>>> for epoch in range(epochs):
... for batch in ds["train"].iter(batch_size=32):
... x, y = batch["image"], batch["label"]
... ...
```
| datasets/docs/source/use_with_jax.mdx/0 | {
"file_path": "datasets/docs/source/use_with_jax.mdx",
"repo_id": "datasets",
"token_count": 2646
} | 182 |
# Copyright 2021 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chrf(++) metric as available in sacrebleu."""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_CITATION = """\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
"""
_DESCRIPTION = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
_KWARGS_DESCRIPTION = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class ChrF(datasets.Metric):
def _info(self):
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
'You can install it with `pip install "sacrebleu>=1.4.12"`.'
)
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf",
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"),
}
),
codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"],
reference_urls=[
"https://github.com/m-popovic/chrF",
],
)
def _compute(
self,
predictions,
references,
char_order: int = CHRF.CHAR_ORDER,
word_order: int = CHRF.WORD_ORDER,
beta: int = CHRF.BETA,
lowercase: bool = False,
whitespace: bool = False,
eps_smoothing: bool = False,
):
references_per_prediction = len(references[0])
if any(len(refs) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
transformed_references = [[refs[i] for refs in references] for i in range(references_per_prediction)]
sb_chrf = CHRF(char_order, word_order, beta, lowercase, whitespace, eps_smoothing)
output = sb_chrf.corpus_score(predictions, transformed_references)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| datasets/metrics/chrf/chrf.py/0 | {
"file_path": "datasets/metrics/chrf/chrf.py",
"repo_id": "datasets",
"token_count": 3170
} | 183 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""F1 metric."""
from sklearn.metrics import f1_score
import datasets
_DESCRIPTION = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
_KWARGS_DESCRIPTION = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
_CITATION = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class F1(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}
),
reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"],
)
def _compute(self, predictions, references, labels=None, pos_label=1, average="binary", sample_weight=None):
score = f1_score(
references, predictions, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight
)
return {"f1": float(score) if score.size == 1 else score}
| datasets/metrics/f1/f1.py/0 | {
"file_path": "datasets/metrics/f1/f1.py",
"repo_id": "datasets",
"token_count": 2364
} | 184 |
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAUVE metric from https://github.com/krishnap25/mauve."""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_CITATION = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
_DESCRIPTION = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
_KWARGS_DESCRIPTION = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Mauve(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/krishnap25/mauve",
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}
),
codebase_urls=["https://github.com/krishnap25/mauve"],
reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
],
)
def _compute(
self,
predictions,
references,
p_features=None,
q_features=None,
p_tokens=None,
q_tokens=None,
num_buckets="auto",
pca_max_data=-1,
kmeans_explained_var=0.9,
kmeans_num_redo=5,
kmeans_max_iter=500,
featurize_model_name="gpt2-large",
device_id=-1,
max_text_length=1024,
divergence_curve_discretization_size=25,
mauve_scaling_factor=5,
verbose=True,
seed=25,
):
out = compute_mauve(
p_text=predictions,
q_text=references,
p_features=p_features,
q_features=q_features,
p_tokens=p_tokens,
q_tokens=q_tokens,
num_buckets=num_buckets,
pca_max_data=pca_max_data,
kmeans_explained_var=kmeans_explained_var,
kmeans_num_redo=kmeans_num_redo,
kmeans_max_iter=kmeans_max_iter,
featurize_model_name=featurize_model_name,
device_id=device_id,
max_text_length=max_text_length,
divergence_curve_discretization_size=divergence_curve_discretization_size,
mauve_scaling_factor=mauve_scaling_factor,
verbose=verbose,
seed=seed,
)
return out
| datasets/metrics/mauve/mauve.py/0 | {
"file_path": "datasets/metrics/mauve/mauve.py",
"repo_id": "datasets",
"token_count": 2588
} | 185 |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accuracy metric."""
from sklearn.metrics import roc_auc_score
import datasets
_DESCRIPTION = """
This metric computes the area under the curve (AUC) for the Receiver Operating Characteristic Curve (ROC). The return values represent how well the model used is predicting the correct classes, based on the input data. A score of `0.5` means that the model is predicting exactly at chance, i.e. the model's predictions are correct at the same rate as if the predictions were being decided by the flip of a fair coin or the roll of a fair die. A score above `0.5` indicates that the model is doing better than chance, while a score below `0.5` indicates that the model is doing worse than chance.
This metric has three separate use cases:
- binary: The case in which there are only two different label classes, and each example gets only one label. This is the default implementation.
- multiclass: The case in which there can be more than two different label classes, but each example still gets only one label.
- multilabel: The case in which there can be more than two different label classes, and each example can have more than one label.
"""
_KWARGS_DESCRIPTION = """
Args:
- references (array-like of shape (n_samples,) or (n_samples, n_classes)): Ground truth labels. Expects different input based on use case:
- binary: expects an array-like of shape (n_samples,)
- multiclass: expects an array-like of shape (n_samples,)
- multilabel: expects an array-like of shape (n_samples, n_classes)
- prediction_scores (array-like of shape (n_samples,) or (n_samples, n_classes)): Model predictions. Expects different inputs based on use case:
- binary: expects an array-like of shape (n_samples,)
- multiclass: expects an array-like of shape (n_samples, n_classes)
- multilabel: expects an array-like of shape (n_samples, n_classes)
- average (`str`): Type of average, and is ignored in the binary use case. Defaults to 'macro'. Options are:
- `'micro'`: Calculates metrics globally by considering each element of the label indicator matrix as a label. Only works with the multilabel use case.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average, weighted by support (i.e. the number of true instances for each label).
- `'samples'`: Calculate metrics for each instance, and find their average. Only works with the multilabel use case.
- `None`: No average is calculated, and scores for each class are returned. Only works with the multilabels use case.
- sample_weight (array-like of shape (n_samples,)): Sample weights. Defaults to None.
- max_fpr (`float`): If not None, the standardized partial AUC over the range [0, `max_fpr`] is returned. Must be greater than `0` and less than or equal to `1`. Defaults to `None`. Note: For the multiclass use case, `max_fpr` should be either `None` or `1.0` as ROC AUC partial computation is not currently supported for `multiclass`.
- multi_class (`str`): Only used for multiclass targets, where it is required. Determines the type of configuration to use. Options are:
- `'ovr'`: Stands for One-vs-rest. Computes the AUC of each class against the rest. This treats the multiclass case in the same way as the multilabel case. Sensitive to class imbalance even when `average == 'macro'`, because class imbalance affects the composition of each of the 'rest' groupings.
- `'ovo'`: Stands for One-vs-one. Computes the average AUC of all possible pairwise combinations of classes. Insensitive to class imbalance when `average == 'macro'`.
- labels (array-like of shape (n_classes,)): Only used for multiclass targets. List of labels that index the classes in
`prediction_scores`. If `None`, the numerical or lexicographical order of the labels in
`prediction_scores` is used. Defaults to `None`.
Returns:
roc_auc (`float` or array-like of shape (n_classes,)): Returns array if in multilabel use case and `average='None'`. Otherwise, returns `float`.
Examples:
Example 1:
>>> roc_auc_score = datasets.load_metric("roc_auc")
>>> refs = [1, 0, 1, 1, 0, 0]
>>> pred_scores = [0.5, 0.2, 0.99, 0.3, 0.1, 0.7]
>>> results = roc_auc_score.compute(references=refs, prediction_scores=pred_scores)
>>> print(round(results['roc_auc'], 2))
0.78
Example 2:
>>> roc_auc_score = datasets.load_metric("roc_auc", "multiclass")
>>> refs = [1, 0, 1, 2, 2, 0]
>>> pred_scores = [[0.3, 0.5, 0.2],
... [0.7, 0.2, 0.1],
... [0.005, 0.99, 0.005],
... [0.2, 0.3, 0.5],
... [0.1, 0.1, 0.8],
... [0.1, 0.7, 0.2]]
>>> results = roc_auc_score.compute(references=refs, prediction_scores=pred_scores, multi_class='ovr')
>>> print(round(results['roc_auc'], 2))
0.85
Example 3:
>>> roc_auc_score = datasets.load_metric("roc_auc", "multilabel")
>>> refs = [[1, 1, 0],
... [1, 1, 0],
... [0, 1, 0],
... [0, 0, 1],
... [0, 1, 1],
... [1, 0, 1]]
>>> pred_scores = [[0.3, 0.5, 0.2],
... [0.7, 0.2, 0.1],
... [0.005, 0.99, 0.005],
... [0.2, 0.3, 0.5],
... [0.1, 0.1, 0.8],
... [0.1, 0.7, 0.2]]
>>> results = roc_auc_score.compute(references=refs, prediction_scores=pred_scores, average=None)
>>> print([round(res, 2) for res in results['roc_auc']])
[0.83, 0.38, 0.94]
"""
_CITATION = """\
@article{doi:10.1177/0272989X8900900307,
author = {Donna Katzman McClish},
title ={Analyzing a Portion of the ROC Curve},
journal = {Medical Decision Making},
volume = {9},
number = {3},
pages = {190-195},
year = {1989},
doi = {10.1177/0272989X8900900307},
note ={PMID: 2668680},
URL = {https://doi.org/10.1177/0272989X8900900307},
eprint = {https://doi.org/10.1177/0272989X8900900307}
}
@article{10.1023/A:1010920819831,
author = {Hand, David J. and Till, Robert J.},
title = {A Simple Generalisation of the Area Under the ROC Curve for Multiple Class Classification Problems},
year = {2001},
issue_date = {November 2001},
publisher = {Kluwer Academic Publishers},
address = {USA},
volume = {45},
number = {2},
issn = {0885-6125},
url = {https://doi.org/10.1023/A:1010920819831},
doi = {10.1023/A:1010920819831},
journal = {Mach. Learn.},
month = {oct},
pages = {171–186},
numpages = {16},
keywords = {Gini index, AUC, error rate, ROC curve, receiver operating characteristic}
}
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class ROCAUC(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"prediction_scores": datasets.Sequence(datasets.Value("float")),
"references": datasets.Value("int32"),
}
if self.config_name == "multiclass"
else {
"references": datasets.Sequence(datasets.Value("int32")),
"prediction_scores": datasets.Sequence(datasets.Value("float")),
}
if self.config_name == "multilabel"
else {
"references": datasets.Value("int32"),
"prediction_scores": datasets.Value("float"),
}
),
reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html"],
)
def _compute(
self,
references,
prediction_scores,
average="macro",
sample_weight=None,
max_fpr=None,
multi_class="raise",
labels=None,
):
return {
"roc_auc": roc_auc_score(
references,
prediction_scores,
average=average,
sample_weight=sample_weight,
max_fpr=max_fpr,
multi_class=multi_class,
labels=labels,
)
}
| datasets/metrics/roc_auc/roc_auc.py/0 | {
"file_path": "datasets/metrics/roc_auc/roc_auc.py",
"repo_id": "datasets",
"token_count": 3792
} | 186 |
# Copyright 2020 The HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQuAD v2 metric."""
import datasets
from .evaluate import (
apply_no_ans_threshold,
find_all_best_thresh,
get_raw_scores,
make_eval_dict,
make_qid_to_has_ans,
merge_eval,
)
_CITATION = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_DESCRIPTION = """
This metric wrap the official scoring script for version 2 of the Stanford Question
Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions
written adversarially by crowdworkers to look similar to answerable ones.
To do well on SQuAD2.0, systems must not only answer questions when possible, but also
determine when no answer is supported by the paragraph and abstain from answering.
"""
_KWARGS_DESCRIPTION = """
Computes SQuAD v2 scores (F1 and EM).
Args:
predictions: List of triple for question-answers to score with the following elements:
- the question-answer 'id' field as given in the references (see below)
- the text of the answer
- the probability that the question has no answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a list of Dict {'text': text of the answer as a string}
no_answer_threshold: float
Probability threshold to decide that a question has no answer.
Returns:
'exact': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'total': Number of score considered
'HasAns_exact': Exact match (the normalized answer exactly match the gold answer)
'HasAns_f1': The F-score of predicted tokens versus the gold answer
'HasAns_total': Number of score considered
'NoAns_exact': Exact match (the normalized answer exactly match the gold answer)
'NoAns_f1': The F-score of predicted tokens versus the gold answer
'NoAns_total': Number of score considered
'best_exact': Best exact match (with varying threshold)
'best_exact_thresh': No-answer probability threshold associated to the best exact match
'best_f1': Best F1 (with varying threshold)
'best_f1_thresh': No-answer probability threshold associated to the best F1
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_v2_metric = datasets.load_metric("squad_v2")
>>> results = squad_v2_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact': 100.0, 'f1': 100.0, 'total': 1, 'HasAns_exact': 100.0, 'HasAns_f1': 100.0, 'HasAns_total': 1, 'best_exact': 100.0, 'best_exact_thresh': 0.0, 'best_f1': 100.0, 'best_f1_thresh': 0.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class SquadV2(datasets.Metric):
def _info(self):
return datasets.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string"),
"prediction_text": datasets.Value("string"),
"no_answer_probability": datasets.Value("float32"),
},
"references": {
"id": datasets.Value("string"),
"answers": datasets.features.Sequence(
{"text": datasets.Value("string"), "answer_start": datasets.Value("int32")}
),
},
}
),
codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"],
)
def _compute(self, predictions, references, no_answer_threshold=1.0):
no_answer_probabilities = {p["id"]: p["no_answer_probability"] for p in predictions}
dataset = [{"paragraphs": [{"qas": references}]}]
predictions = {p["id"]: p["prediction_text"] for p in predictions}
qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(dataset, predictions)
exact_thresh = apply_no_ans_threshold(exact_raw, no_answer_probabilities, qid_to_has_ans, no_answer_threshold)
f1_thresh = apply_no_ans_threshold(f1_raw, no_answer_probabilities, qid_to_has_ans, no_answer_threshold)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, "HasAns")
if no_ans_qids:
no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, "NoAns")
find_all_best_thresh(out_eval, predictions, exact_raw, f1_raw, no_answer_probabilities, qid_to_has_ans)
return dict(out_eval)
| datasets/metrics/squad_v2/squad_v2.py/0 | {
"file_path": "datasets/metrics/squad_v2/squad_v2.py",
"repo_id": "datasets",
"token_count": 2564
} | 187 |
[tool.ruff]
line-length = 119
[tool.ruff.lint]
# Ignored rules:
# "E501" -> line length violation
# "F821" -> undefined named in type annotation (e.g. Literal["something"])
# "C901" -> `function_name` is too complex
ignore = ["E501", "F821", "C901"]
select = ["C", "E", "F", "I", "W"]
[tool.ruff.lint.isort]
lines-after-imports = 2
known-first-party = ["datasets"]
[tool.pytest.ini_options]
# Test fails if a FutureWarning is thrown by `huggingface_hub`
filterwarnings = [
"error::FutureWarning:huggingface_hub*",
]
markers = [
"unit: unit test",
"integration: integration test",
]
| datasets/pyproject.toml/0 | {
"file_path": "datasets/pyproject.toml",
"repo_id": "datasets",
"token_count": 236
} | 188 |
import logging
import os
from argparse import ArgumentParser
from pathlib import Path
from shutil import copyfile, rmtree
from typing import Generator
import datasets.config
from datasets.builder import DatasetBuilder
from datasets.commands import BaseDatasetsCLICommand
from datasets.download.download_manager import DownloadMode
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.info_utils import VerificationMode
from datasets.utils.logging import ERROR, get_logger
logger = get_logger(__name__)
def _test_command_factory(args):
return TestCommand(
args.dataset,
args.name,
args.cache_dir,
args.data_dir,
args.all_configs,
args.save_info or args.save_infos,
args.ignore_verifications,
args.force_redownload,
args.clear_cache,
args.num_proc,
)
class TestCommand(BaseDatasetsCLICommand):
__test__ = False # to tell pytest it's not a test class
@staticmethod
def register_subcommand(parser: ArgumentParser):
test_parser = parser.add_parser("test", help="Test dataset implementation.")
test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name")
test_parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="Cache directory where the datasets are stored.",
)
test_parser.add_argument(
"--data_dir",
type=str,
default=None,
help="Can be used to specify a manual directory to get the files from.",
)
test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations")
test_parser.add_argument(
"--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)"
)
test_parser.add_argument(
"--ignore_verifications",
action="store_true",
help="Run the test without checksums and splits checks.",
)
test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload")
test_parser.add_argument(
"--clear_cache",
action="store_true",
help="Remove downloaded files and cached datasets after each config test",
)
test_parser.add_argument("--num_proc", type=int, default=None, help="Number of processes")
# aliases
test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info")
test_parser.add_argument("dataset", type=str, help="Name of the dataset to download")
test_parser.set_defaults(func=_test_command_factory)
def __init__(
self,
dataset: str,
name: str,
cache_dir: str,
data_dir: str,
all_configs: bool,
save_infos: bool,
ignore_verifications: bool,
force_redownload: bool,
clear_cache: bool,
num_proc: int,
):
self._dataset = dataset
self._name = name
self._cache_dir = cache_dir
self._data_dir = data_dir
self._all_configs = all_configs
self._save_infos = save_infos
self._ignore_verifications = ignore_verifications
self._force_redownload = force_redownload
self._clear_cache = clear_cache
self._num_proc = num_proc
if clear_cache and not cache_dir:
print(
"When --clear_cache is used, specifying a cache directory is mandatory.\n"
"The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n"
"Please provide a --cache_dir that will be used to test the dataset script."
)
exit(1)
if save_infos:
self._ignore_verifications = True
def run(self):
logging.getLogger("filelock").setLevel(ERROR)
if self._name is not None and self._all_configs:
print("Both parameters `config` and `all_configs` can't be used at once.")
exit(1)
path, config_name = self._dataset, self._name
module = dataset_module_factory(path)
builder_cls = import_main_class(module.module_path)
n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1
def get_builders() -> Generator[DatasetBuilder, None, None]:
if self._all_configs and builder_cls.BUILDER_CONFIGS:
for i, config in enumerate(builder_cls.BUILDER_CONFIGS):
if "config_name" in module.builder_kwargs:
yield builder_cls(
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
else:
yield builder_cls(
config_name=config.name,
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
else:
if "config_name" in module.builder_kwargs:
yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs)
else:
yield builder_cls(
config_name=config_name,
cache_dir=self._cache_dir,
data_dir=self._data_dir,
**module.builder_kwargs,
)
for j, builder in enumerate(get_builders()):
print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})")
builder._record_infos = os.path.exists(
os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME)
) # record checksums only if we need to update a (deprecated) dataset_infos.json
builder.download_and_prepare(
download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS
if not self._force_redownload
else DownloadMode.FORCE_REDOWNLOAD,
verification_mode=VerificationMode.NO_CHECKS
if self._ignore_verifications
else VerificationMode.ALL_CHECKS,
try_from_hf_gcs=False,
num_proc=self._num_proc,
)
builder.as_dataset()
if self._save_infos:
builder._save_infos()
# If save_infos=True, the dataset card (README.md) is created next to the loaded module file.
# The dataset_infos are saved in the YAML part of the README.md
# Let's move it to the original directory of the dataset script, to allow the user to
# upload them on S3 at the same time afterwards.
if self._save_infos:
dataset_readme_path = os.path.join(
builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILENAME
)
name = Path(path).name + ".py"
combined_path = os.path.join(path, name)
if os.path.isfile(path):
dataset_dir = os.path.dirname(path)
elif os.path.isfile(combined_path):
dataset_dir = path
elif os.path.isdir(path): # for local directories containing only data files
dataset_dir = path
else: # in case of a remote dataset
dataset_dir = None
print(f"Dataset card saved at {dataset_readme_path}")
# Move dataset_info back to the user
if dataset_dir is not None:
user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME)
copyfile(dataset_readme_path, user_dataset_readme_path)
print(f"Dataset card saved at {user_dataset_readme_path}")
# If clear_cache=True, the download folder and the dataset builder cache directory are deleted
if self._clear_cache:
if os.path.isdir(builder._cache_dir):
logger.warning(f"Clearing cache at {builder._cache_dir}")
rmtree(builder._cache_dir)
download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR)
if os.path.isdir(download_dir):
logger.warning(f"Clearing cache at {download_dir}")
rmtree(download_dir)
print("Test successful.")
| datasets/src/datasets/commands/test.py/0 | {
"file_path": "datasets/src/datasets/commands/test.py",
"repo_id": "datasets",
"token_count": 4202
} | 189 |