text
stringlengths
7
318k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
439
# Model arguments model_name_or_path: mistralai/Mistral-7B-v0.1 model_revision: main torch_dtype: bfloat16 use_flash_attention_2: true # Data training arguments chat_template: "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" dataset_mixer: HuggingFaceH4/ultrachat_200k: 1.0 dataset_splits: - train_sft - test_sft preprocessing_num_workers: 12 # SFT trainer config bf16: true do_eval: true evaluation_strategy: epoch gradient_accumulation_steps: 1 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: False hub_model_id: zephyr-7b-sft-full hub_strategy: every_save learning_rate: 2.0e-05 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 2048 max_steps: -1 num_train_epochs: 1 output_dir: data/zephyr-7b-sft-full overwrite_output_dir: true per_device_eval_batch_size: 8 per_device_train_batch_size: 16 push_to_hub: true remove_unused_columns: true report_to: - tensorboard save_strategy: "steps" save_steps: 100 save_total_limit: 1 seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/zephyr-7b-beta/sft/config_full.yaml/0
{ "file_path": "alignment-handbook/recipes/zephyr-7b-beta/sft/config_full.yaml", "repo_id": "alignment-handbook", "token_count": 568 }
12
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from copy import deepcopy import pytest from datasets import Dataset from transformers import AutoTokenizer from alignment import DataArguments, ModelArguments, apply_chat_template, get_datasets, get_tokenizer from alignment.data import maybe_insert_system_message class GetDatasetsTest(unittest.TestCase): """Each of these test datasets has 100 examples""" def test_loading_data_args(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 0.5, "HuggingFaceH4/testing_self_instruct_small": 0.3, "HuggingFaceH4/testing_codealpaca_small": 0.2, } data_args = DataArguments(dataset_mixer=dataset_mixer) datasets = get_datasets(data_args) self.assertEqual(len(datasets["train"]), 100) self.assertEqual(len(datasets["test"]), 300) def test_loading_data_dict(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 0.5, "HuggingFaceH4/testing_self_instruct_small": 0.3, "HuggingFaceH4/testing_codealpaca_small": 0.2, } datasets = get_datasets(dataset_mixer) self.assertEqual(len(datasets["train"]), 100) self.assertEqual(len(datasets["test"]), 300) def test_loading_with_unit_fractions(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 1.0, "HuggingFaceH4/testing_self_instruct_small": 1.0, "HuggingFaceH4/testing_codealpaca_small": 1.0, } datasets = get_datasets(dataset_mixer) self.assertEqual(len(datasets["train"]), 300) self.assertEqual(len(datasets["test"]), 300) def test_loading_with_fractions_greater_than_unity(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 0.7, "HuggingFaceH4/testing_self_instruct_small": 0.4, } datasets = get_datasets(dataset_mixer) self.assertEqual(len(datasets["train"]), 70 + 40) self.assertEqual(len(datasets["test"]), 200) def test_loading_fails_with_negative_fractions(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 0.7, "HuggingFaceH4/testing_self_instruct_small": -0.3, } with pytest.raises(ValueError, match=r"Dataset fractions cannot be negative."): get_datasets(dataset_mixer) def test_loading_single_split_with_unit_fractions(self): dataset_mixer = { "HuggingFaceH4/testing_alpaca_small": 1.0, } datasets = get_datasets(dataset_mixer, splits=["test"]) self.assertEqual(len(datasets["test"]), 100) self.assertRaises(KeyError, lambda: datasets["train"]) class ApplyChatTemplateTest(unittest.TestCase): def setUp(self): model_args = ModelArguments(model_name_or_path="HuggingFaceH4/zephyr-7b-alpha") data_args = DataArguments() self.tokenizer = get_tokenizer(model_args, data_args) self.dataset = Dataset.from_dict( { "prompt": ["Hello!"], "messages": [ [ {"role": "system", "content": "You are a happy chatbot"}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Bonjour!"}, {"role": "user", "content": "How are you?"}, {"role": "assistant", "content": "I am doing well, thanks!"}, ] ], "chosen": [ [ {"role": "system", "content": "You are a happy chatbot"}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Bonjour!"}, {"role": "user", "content": "How are you?"}, {"role": "assistant", "content": "I am doing well, thanks!"}, ] ], "rejected": [ [ {"role": "system", "content": "You are a happy chatbot"}, {"role": "user", "content": "Hello!"}, {"role": "assistant", "content": "Bonjour!"}, {"role": "user", "content": "How are you?"}, {"role": "assistant", "content": "Not so good tbh"}, ] ], } ) def test_maybe_insert_system_message(self): # does not accept system prompt mistral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") # accepts system prompt. use codellama since it has no HF token reqiurement llama_tokenizer = AutoTokenizer.from_pretrained("codellama/CodeLlama-7b-hf") messages_sys_excl = [{"role": "user", "content": "Tell me a joke."}] messages_sys_incl = [{"role": "system", "content": ""}, {"role": "user", "content": "Tell me a joke."}] mistral_messages = deepcopy(messages_sys_excl) llama_messages = deepcopy(messages_sys_excl) maybe_insert_system_message(mistral_messages, mistral_tokenizer) maybe_insert_system_message(llama_messages, llama_tokenizer) # output from mistral should not have a system message, output from llama should self.assertEqual(mistral_messages, messages_sys_excl) self.assertEqual(llama_messages, messages_sys_incl) def test_sft(self): dataset = self.dataset.map( apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer, "task": "sft"}, remove_columns=self.dataset.column_names, ) self.assertDictEqual( dataset[0], { "text": "<|system|>\nYou are a happy chatbot</s>\n<|user|>\nHello!</s>\n<|assistant|>\nBonjour!</s>\n<|user|>\nHow are you?</s>\n<|assistant|>\nI am doing well, thanks!</s>\n" }, ) def test_generation(self): # Remove last turn from messages dataset = self.dataset.map(lambda x: {"messages": x["messages"][:-1]}) dataset = dataset.map( apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer, "task": "generation"}, remove_columns=self.dataset.column_names, ) self.assertDictEqual( dataset[0], { "text": "<|system|>\nYou are a happy chatbot</s>\n<|user|>\nHello!</s>\n<|assistant|>\nBonjour!</s>\n<|user|>\nHow are you?</s>\n<|assistant|>\n" }, ) def test_rm(self): dataset = self.dataset.map( apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer, "task": "rm"}, remove_columns=self.dataset.column_names, ) self.assertDictEqual( dataset[0], { "text_chosen": "<|system|>\nYou are a happy chatbot</s>\n<|user|>\nHello!</s>\n<|assistant|>\nBonjour!</s>\n<|user|>\nHow are you?</s>\n<|assistant|>\nI am doing well, thanks!</s>\n", "text_rejected": "<|system|>\nYou are a happy chatbot</s>\n<|user|>\nHello!</s>\n<|assistant|>\nBonjour!</s>\n<|user|>\nHow are you?</s>\n<|assistant|>\nNot so good tbh</s>\n", }, ) def test_dpo(self): dataset = self.dataset.map( apply_chat_template, fn_kwargs={"tokenizer": self.tokenizer, "task": "dpo"}, remove_columns=self.dataset.column_names, ) self.assertDictEqual( dataset[0], { "text_prompt": "<|system|>\nYou are a happy chatbot</s>\n<|user|>\nHello!</s>\n<|assistant|>\nBonjour!</s>\n<|user|>\nHow are you?</s>\n", "text_chosen": "<|assistant|>\nI am doing well, thanks!</s>\n", "text_rejected": "<|assistant|>\nNot so good tbh</s>\n", }, )
alignment-handbook/tests/test_data.py/0
{ "file_path": "alignment-handbook/tests/test_data.py", "repo_id": "alignment-handbook", "token_count": 4201 }
13
# Introduction {{#include ../../README.md:features}} This book will introduce step by step how to use `candle`.
candle/candle-book/src/README.md/0
{ "file_path": "candle/candle-book/src/README.md", "repo_id": "candle", "token_count": 34 }
14
# Porting a custom kernel
candle/candle-book/src/inference/cuda/porting.md/0
{ "file_path": "candle/candle-book/src/inference/cuda/porting.md", "repo_id": "candle", "token_count": 7 }
15
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{DType, Device, Tensor}; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn run(a: &Tensor, b: &Tensor) { a.matmul(&b.t().unwrap()).unwrap(); } fn run_bench(c: &mut Criterion, device: &Device) { let b = 1; let m = 1; let n = 2048; let k = 2048; let dtype = DType::F32; let lhs = Tensor::zeros((b, m, k), dtype, device).unwrap(); let rhs = Tensor::zeros((b, n, k), dtype, device).unwrap(); let flops = b * m * n * k; let mut group = c.benchmark_group(device.bench_name("matmul")); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run(black_box(&lhs), black_box(&rhs)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let handler = BenchDeviceHandler::new().unwrap(); for device in handler.devices { run_bench(c, &device); } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/matmul.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/matmul.rs", "repo_id": "candle", "token_count": 551 }
16
pub mod erf; pub mod kernels; trait Cpu<const ARR: usize> { type Unit; type Array; const STEP: usize; const EPR: usize; fn n() -> usize; unsafe fn zero() -> Self::Unit; unsafe fn zero_array() -> Self::Array; unsafe fn load(mem_addr: *const f32) -> Self::Unit; unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit; unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit; unsafe fn vec_reduce(x: Self::Array, y: *mut f32); unsafe fn from_f32(v: f32) -> Self::Unit; unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit); } trait CpuF16<const ARR: usize> { type Unit; type Array; const STEP: usize; const EPR: usize; fn n() -> usize; unsafe fn zero() -> Self::Unit; unsafe fn zero_array() -> Self::Array; unsafe fn load(mem_addr: *const f16) -> Self::Unit; unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit; unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit; unsafe fn vec_reduce(x: Self::Array, y: *mut f32); unsafe fn from_f32(v: f32) -> Self::Unit; unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit); } use half::f16; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] #[cfg(target_feature = "avx")] pub mod avx; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] #[cfg(target_feature = "avx")] pub use avx::{CurrentCpu, CurrentCpuF16}; #[cfg(target_arch = "wasm32")] #[cfg(target_feature = "simd128")] pub mod simd128; #[cfg(target_arch = "wasm32")] #[cfg(target_feature = "simd128")] pub use simd128::CurrentCpu; #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] #[cfg(target_feature = "neon")] pub mod neon; #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] #[cfg(target_feature = "neon")] pub use neon::CurrentCpu; #[cfg(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" ))] #[inline(always)] pub(crate) unsafe fn vec_dot_f32(a_row: *const f32, b_row: *const f32, c: *mut f32, k: usize) { let np = k & !(CurrentCpu::STEP - 1); let mut sum = CurrentCpu::zero_array(); let mut ax = CurrentCpu::zero_array(); let mut ay = CurrentCpu::zero_array(); for i in (0..np).step_by(CurrentCpu::STEP) { for j in 0..CurrentCpu::n() { ax[j] = CurrentCpu::load(a_row.add(i + j * CurrentCpu::EPR)); ay[j] = CurrentCpu::load(b_row.add(i + j * CurrentCpu::EPR)); sum[j] = CurrentCpu::vec_fma(sum[j], ax[j], ay[j]); } } CurrentCpu::vec_reduce(sum, c); // leftovers for i in np..k { *c += *a_row.add(i) * (*b_row.add(i)); } } #[cfg(not(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" )))] #[inline(always)] pub(crate) unsafe fn vec_dot_f32(a_row: *const f32, b_row: *const f32, c: *mut f32, k: usize) { // leftovers for i in 0..k { *c += *a_row.add(i) * (*b_row.add(i)); } } #[cfg(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" ))] #[inline(always)] pub(crate) unsafe fn vec_sum(row: *const f32, b: *mut f32, k: usize) { let np = k & !(CurrentCpu::STEP - 1); let mut sum = CurrentCpu::zero_array(); let mut x = CurrentCpu::zero_array(); for i in (0..np).step_by(CurrentCpu::STEP) { for j in 0..CurrentCpu::n() { x[j] = CurrentCpu::load(row.add(i + j * CurrentCpu::EPR)); sum[j] = CurrentCpu::vec_add(sum[j], x[j]); } } CurrentCpu::vec_reduce(sum, b); // leftovers for i in np..k { *b += *row.add(i) } } #[cfg(not(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" )))] #[inline(always)] pub(crate) unsafe fn vec_sum(row: *const f32, b: *mut f32, k: usize) { *b = 0f32; for i in 0..k { *b += *row.add(i) } } #[cfg(target_feature = "avx")] #[inline(always)] pub(crate) unsafe fn vec_dot_f16(a_row: *const f16, b_row: *const f16, c: *mut f32, k: usize) { let mut sumf = 0.0f32; let np = k & !(CurrentCpuF16::STEP - 1); let mut sum = CurrentCpuF16::zero_array(); let mut ax = CurrentCpuF16::zero_array(); let mut ay = CurrentCpuF16::zero_array(); for i in (0..np).step_by(CurrentCpuF16::STEP) { for j in 0..CurrentCpuF16::n() { ax[j] = CurrentCpuF16::load(a_row.add(i + j * CurrentCpuF16::EPR)); ay[j] = CurrentCpuF16::load(b_row.add(i + j * CurrentCpuF16::EPR)); sum[j] = CurrentCpuF16::vec_fma(sum[j], ax[j], ay[j]); } } CurrentCpuF16::vec_reduce(sum, &mut sumf); // leftovers for i in np..k { sumf += (*a_row.add(i)).to_f32() * (*b_row.add(i)).to_f32(); } *c = sumf; } #[cfg(not(target_feature = "avx"))] #[inline(always)] pub(crate) unsafe fn vec_dot_f16(a_row: *const f16, b_row: *const f16, c: *mut f32, k: usize) { // leftovers let mut sum = 0.0; for i in 0..k { sum += (*a_row.add(i)).to_f32() * (*b_row.add(i)).to_f32(); } *c = sum; }
candle/candle-core/src/cpu/mod.rs/0
{ "file_path": "candle/candle-core/src/cpu/mod.rs", "repo_id": "candle", "token_count": 2416 }
17
#![allow(dead_code)] use libc::{c_char, c_double, c_float, c_int}; mod ffi { use super::*; extern "C" { pub fn vsTanh(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdTanh(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsExp(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdExp(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsLn(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdLn(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsSin(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdSin(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsCos(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdCos(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsSqrt(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdSqrt(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsAdd(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdAdd(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsSub(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdSub(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsMul(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdMul(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsDiv(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdDiv(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsFmax(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdFmax(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsFmin(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdFmin(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn sgemm_( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const c_float, a: *const c_float, lda: *const c_int, b: *const c_float, ldb: *const c_int, beta: *const c_float, c: *mut c_float, ldc: *const c_int, ); pub fn dgemm_( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const c_double, a: *const c_double, lda: *const c_int, b: *const c_double, ldb: *const c_int, beta: *const c_double, c: *mut c_double, ldc: *const c_int, ); pub fn hgemm_( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const half::f16, a: *const half::f16, lda: *const c_int, b: *const half::f16, ldb: *const c_int, beta: *const half::f16, c: *mut half::f16, ldc: *const c_int, ); } } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn sgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: f32, a: &[f32], lda: i32, b: &[f32], ldb: i32, beta: f32, c: &mut [f32], ldc: i32, ) { ffi::sgemm_( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn dgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: f64, a: &[f64], lda: i32, b: &[f64], ldb: i32, beta: f64, c: &mut [f64], ldc: i32, ) { ffi::dgemm_( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn hgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: half::f16, a: &[half::f16], lda: i32, b: &[half::f16], ldb: i32, beta: half::f16, c: &mut [half::f16], ldc: i32, ) { ffi::hgemm_( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[inline] pub fn vs_exp(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsExp(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_exp(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdExp(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_ln(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsLn(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_ln(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdLn(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_sin(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsSin(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_sin(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdSin(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_cos(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsCos(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_cos(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdCos(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_sqrt(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsSqrt(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_sqrt(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdSqrt(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_sqr(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsMul(a_len as i32, a.as_ptr(), a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_sqr(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdMul(a_len as i32, a.as_ptr(), a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_tanh(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsTanh(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_tanh(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdTanh(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } // The vector functions from mkl can be performed in place by using the same array for input and // output. // https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/vector-mathematical-functions.html #[inline] pub fn vs_tanh_inplace(y: &mut [f32]) { unsafe { ffi::vsTanh(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_tanh_inplace(y: &mut [f64]) { unsafe { ffi::vdTanh(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_gelu(vs: &[f32], ys: &mut [f32]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = (2.0f32 / std::f32::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v) } vs_tanh_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = 0.5 * v * (1.0 + *y) } } #[inline] pub fn vd_gelu(vs: &[f64], ys: &mut [f64]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = (2.0f64 / std::f64::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v) } vd_tanh_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = 0.5 * v * (1.0 + *y) } } macro_rules! binary_op { ($fn_name:ident, $ty:ty, $mkl_name:ident) => { #[inline] pub fn $fn_name(a: &[$ty], b: &[$ty], y: &mut [$ty]) { let a_len = a.len(); let b_len = b.len(); let y_len = y.len(); if a_len != y_len || b_len != y_len { panic!( "{} a,b,y len mismatch {a_len} {b_len} {y_len}", stringify!($fn_name) ); } unsafe { ffi::$mkl_name(a_len as i32, a.as_ptr(), b.as_ptr(), y.as_mut_ptr()) } } }; } binary_op!(vs_add, f32, vsAdd); binary_op!(vd_add, f64, vdAdd); binary_op!(vs_sub, f32, vsSub); binary_op!(vd_sub, f64, vdSub); binary_op!(vs_mul, f32, vsMul); binary_op!(vd_mul, f64, vdMul); binary_op!(vs_div, f32, vsDiv); binary_op!(vd_div, f64, vdDiv); binary_op!(vs_max, f32, vsFmax); binary_op!(vd_max, f64, vdFmax); binary_op!(vs_min, f32, vsFmin); binary_op!(vd_min, f64, vdFmin);
candle/candle-core/src/mkl.rs/0
{ "file_path": "candle/candle-core/src/mkl.rs", "repo_id": "candle", "token_count": 6060 }
18
use crate::backend::BackendStorage; use crate::op::{self, CmpOp, CustomOp1, CustomOp2, CustomOp3, ReduceOp}; use crate::{CpuStorage, CudaStorage, DType, Device, Error, Layout, MetalStorage, Result, Shape}; // We do not want to implement Clone on Storage as cloning may fail because of // out of memory. Instead try_clone should be used. #[derive(Debug)] pub enum Storage { Cpu(CpuStorage), Cuda(CudaStorage), Metal(MetalStorage), } impl Storage { pub fn try_clone(&self, layout: &Layout) -> Result<Self> { match self { Self::Cpu(storage) => Ok(Self::Cpu(storage.clone())), Self::Cuda(storage) => { let storage = storage.try_clone(layout)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.try_clone(layout)?; Ok(Self::Metal(storage)) } } } pub fn device(&self) -> Device { match self { Self::Cpu(_) => Device::Cpu, Self::Cuda(storage) => Device::Cuda(storage.device().clone()), Self::Metal(storage) => Device::Metal(storage.device().clone()), } } pub fn dtype(&self) -> DType { match self { Self::Cpu(storage) => storage.dtype(), Self::Cuda(storage) => storage.dtype(), Self::Metal(storage) => storage.dtype(), } } pub(crate) fn same_device(&self, rhs: &Self, op: &'static str) -> Result<()> { let lhs = self.device().location(); let rhs = rhs.device().location(); if lhs != rhs { Err(Error::DeviceMismatchBinaryOp { lhs, rhs, op }.bt()) } else { Ok(()) } } pub(crate) fn same_dtype(&self, rhs: &Self, op: &'static str) -> Result<()> { let lhs = self.dtype(); let rhs = rhs.dtype(); if lhs != rhs { Err(Error::DTypeMismatchBinaryOp { lhs, rhs, op }.bt()) } else { Ok(()) } } pub(crate) fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Metal(storage)) } } } pub(crate) fn powf(&self, layout: &Layout, alpha: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Metal(storage)) } } } pub(crate) fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Metal(storage)) } } } pub(crate) fn cmp( &self, op: CmpOp, rhs: &Self, lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, "cmp")?; self.same_dtype(rhs, "cmp")?; match (self, rhs) { (Storage::Cpu(lhs), Storage::Cpu(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => { // Should not happen because of the same device check above but we're defensive // anyway. Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "cmp", } .bt()) } } } pub(crate) fn reduce_op(&self, op: ReduceOp, layout: &Layout, s: &[usize]) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Metal(storage)) } } } pub(crate) fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Metal(storage)) } } } pub(crate) fn apply_op1(&self, l: &Layout, c: &dyn CustomOp1) -> Result<(Self, Shape)> { match self { Self::Cpu(storage) => { let (storage, shape) = c.cpu_fwd(storage, l)?; Ok((Self::Cpu(storage), shape)) } Self::Cuda(storage) => { let (storage, shape) = c.cuda_fwd(storage, l)?; Ok((Self::Cuda(storage), shape)) } Self::Metal(storage) => { let (storage, shape) = c.metal_fwd(storage, l)?; Ok((Self::Metal(storage), shape)) } } } pub(crate) fn apply_op2( &self, l1: &Layout, t2: &Self, l2: &Layout, c: &dyn CustomOp2, ) -> Result<(Self, Shape)> { self.same_device(t2, c.name())?; match (self, t2) { (Self::Cpu(s1), Self::Cpu(s2)) => { let (s, shape) = c.cpu_fwd(s1, l1, s2, l2)?; Ok((Self::Cpu(s), shape)) } (Self::Cuda(s1), Self::Cuda(s2)) => { let (s, shape) = c.cuda_fwd(s1, l1, s2, l2)?; Ok((Self::Cuda(s), shape)) } (Self::Metal(s1), Self::Metal(s2)) => { let (s, shape) = c.metal_fwd(s1, l1, s2, l2)?; Ok((Self::Metal(s), shape)) } _ => unreachable!(), } } pub(crate) fn apply_op3( &self, l1: &Layout, t2: &Self, l2: &Layout, t3: &Self, l3: &Layout, c: &dyn CustomOp3, ) -> Result<(Self, Shape)> { self.same_device(t2, c.name())?; self.same_device(t3, c.name())?; match (self, t2, t3) { (Self::Cpu(s1), Self::Cpu(s2), Self::Cpu(s3)) => { let (s, shape) = c.cpu_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Cpu(s), shape)) } (Self::Cuda(s1), Self::Cuda(s2), Self::Cuda(s3)) => { let (s, shape) = c.cuda_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Cuda(s), shape)) } (Self::Metal(s1), Self::Metal(s2), Self::Metal(s3)) => { let (s, shape) = c.metal_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Metal(s), shape)) } _ => unreachable!(), } } pub(crate) fn unary_impl<B: op::UnaryOpT>(&self, layout: &Layout) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Metal(storage)) } } } pub(crate) fn binary_impl<B: op::BinaryOpT>( &self, rhs: &Self, lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, B::NAME)?; self.same_dtype(rhs, B::NAME)?; match (self, rhs) { (Storage::Cpu(lhs), Storage::Cpu(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => { // Should not happen because of the same device check above but we're defensive // anyway. Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: B::NAME, } .bt()) } } } pub(crate) fn conv1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv1D, ) -> Result<Self> { self.same_device(kernel, "conv1d")?; self.same_dtype(kernel, "conv1d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv1d", } .bt()), } } pub(crate) fn conv_transpose1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { self.same_device(kernel, "conv-transpose1d")?; self.same_dtype(kernel, "conv-transpose1d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv-transpose1d", } .bt()), } } pub(crate) fn conv2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { self.same_device(kernel, "conv2d")?; self.same_dtype(kernel, "conv2d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv2d", } .bt()), } } pub(crate) fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { self.same_device(kernel, "conv_transpose2d")?; self.same_dtype(kernel, "conv_transpose2d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv_transpose2d", } .bt()), } } pub(crate) fn avg_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Metal(storage)) } } } pub(crate) fn max_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Metal(storage)) } } } pub(crate) fn upsample_nearest1d(&self, layout: &Layout, sz: usize) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Metal(storage)) } } } pub(crate) fn upsample_nearest2d(&self, layout: &Layout, h: usize, w: usize) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Metal(storage)) } } } pub(crate) fn where_cond( &self, layout: &Layout, t: &Self, layout_t: &Layout, f: &Self, layout_f: &Layout, ) -> Result<Self> { self.same_device(t, "where")?; self.same_device(f, "where")?; t.same_dtype(f, "where")?; match (self, t, f) { (Storage::Cpu(cond), Storage::Cpu(t), Storage::Cpu(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Cpu(storage)) } (Self::Cuda(cond), Self::Cuda(t), Self::Cuda(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Cuda(storage)) } (Self::Metal(cond), Self::Metal(t), Self::Metal(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Metal(storage)) } (_, lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "where", } .bt()), } } pub(crate) fn gather( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "index-add")?; match (self, indexes) { (Self::Cpu(s), Self::Cpu(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn scatter_add( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, source: &Self, source_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "scatter-add")?; self.same_device(source, "scatter-add")?; match (self, indexes, source) { (Self::Cpu(s), Self::Cpu(indexes), Self::Cpu(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes), Self::Cuda(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes), Self::Metal(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn index_add( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, source: &Self, source_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "index-add")?; self.same_device(source, "index-add")?; match (self, indexes, source) { (Self::Cpu(s), Self::Cpu(indexes), Self::Cpu(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes), Self::Cuda(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes), Self::Metal(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn index_select( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(rhs, "index-select")?; match (self, rhs) { (Self::Cpu(lhs), Self::Cpu(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Metal(storage)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "index-select", } .bt()), } } pub(crate) fn matmul( &self, rhs: &Self, bmnk: (usize, usize, usize, usize), lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, "matmul")?; self.same_dtype(rhs, "matmul")?; match (self, rhs) { (Self::Cpu(lhs), Self::Cpu(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "matmul", } .bt()), } } // self, the source can be strided whereas dst is contiguous. pub(crate) fn copy_strided_src( &self, dst: &mut Self, dst_offset: usize, src_l: &Layout, ) -> Result<()> { match (self, dst) { (Self::Cpu(src), Self::Cpu(dst)) => src.copy_strided_src(dst, dst_offset, src_l), (Self::Cuda(src), Self::Cuda(dst)) => Ok(src.copy_strided_src(dst, dst_offset, src_l)?), (Self::Metal(src), Self::Metal(dst)) => { Ok(src.copy_strided_src(dst, dst_offset, src_l)?) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "copy", } .bt()), } } }
candle/candle-core/src/storage.rs/0
{ "file_path": "candle/candle-core/src/storage.rs", "repo_id": "candle", "token_count": 13775 }
19
use candle_core::{test_device, test_utils, DType, Device, IndexOp, Result, Tensor, D}; fn zeros(device: &Device) -> Result<()> { let tensor = Tensor::zeros((5, 2), DType::F32, device)?; let (dim1, dim2) = tensor.dims2()?; assert_eq!(dim1, 5); assert_eq!(dim2, 2); Ok(()) } fn ones(device: &Device) -> Result<()> { assert_eq!( Tensor::ones((2, 3), DType::U8, device)?.to_vec2::<u8>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::U32, device)?.to_vec2::<u32>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::I64, device)?.to_vec2::<i64>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::F32, device)?.to_vec2::<f32>()?, [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ); assert_eq!( Tensor::ones((2, 3), DType::F64, device)?.to_vec2::<f64>()?, [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ); Ok(()) } fn full(device: &Device) -> Result<()> { assert_eq!( Tensor::full(42u32, (2, 3), device)?.to_vec2::<u32>()?, [[42, 42, 42], [42, 42, 42]], ); Ok(()) } fn arange(device: &Device) -> Result<()> { assert_eq!( Tensor::arange(0u8, 5u8, device)?.to_vec1::<u8>()?, [0, 1, 2, 3, 4], ); assert_eq!( Tensor::arange_step(0u8, 5u8, 2, device)?.to_vec1::<u8>()?, [0, 2, 4], ); assert_eq!( Tensor::arange_step(0u8, 5u8, 3, device)?.to_vec1::<u8>()?, [0, 3], ); assert_eq!( Tensor::arange_step(5i64, 0i64, -1, device)?.to_vec1::<i64>()?, [5, 4, 3, 2, 1], ); Ok(()) } fn add_mul(device: &Device) -> Result<()> { let tensor = Tensor::new(&[3f32, 1., 4.], device)?; let dim1 = tensor.dims1()?; assert_eq!(dim1, 3); let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [3., 1., 4.]); let tensor = Tensor::add(&tensor, &tensor)?; let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [6., 2., 8.]); let tensor = Tensor::mul(&tensor, &tensor)?; let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [36., 4., 64.]); Ok(()) } fn tensor_2d(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?; let dims = tensor.dims2()?; assert_eq!(dims, (2, 5)); let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content, data); Ok(()) } fn clamp(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?; let tensor = tensor.clamp(1.5, 6.2)?; assert_eq!( tensor.to_vec2::<f32>()?, [[3.0, 1.5, 4.0, 1.5, 5.0], [2.0, 1.5, 6.2, 6.2, 2.0]], ); Ok(()) } fn unary_op(device: &Device) -> Result<()> { let data = &[[-3f32, 1., 4., -0.1, 0.5], [2.7, -1.8, -0.28, 1.8, 2.8]]; let tensor = Tensor::new(data, device)?; assert_eq!( test_utils::to_vec2_round(&tensor.gelu()?, 4)?, [ [-0.0036, 0.8412, 3.9999, -0.046, 0.3457], [2.6911, -0.0647, -0.1091, 1.7353, 2.7933] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.gelu_erf()?, 4)?, [ [-0.004, 0.8413, 3.9999, -0.046, 0.3457], [2.6906, -0.0647, -0.1091, 1.7353, 2.7928] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.erf()?, 4)?, [ [-1.0, 0.8427, 1.0, -0.1125, 0.5205], [0.9999, -0.9891, -0.3079, 0.9891, 0.9999] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.ceil()?, 4)?, [[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -1.0, -0.0, 2.0, 3.0]] ); assert_eq!( test_utils::to_vec2_round(&tensor.floor()?, 4)?, [[-3.0, 1.0, 4.0, -1.0, 0.0], [2.0, -2.0, -1.0, 1.0, 2.0]] ); assert_eq!( test_utils::to_vec2_round(&tensor.round()?, 4)?, [[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -2.0, -0.0, 2.0, 3.0]] ); let tensor = Tensor::new(&[2997.9246, 314.15926f32], device)?; assert_eq!( test_utils::to_vec1_round(&tensor.round_to(2)?, 4)?, [2997.92, 314.16] ); assert_eq!( test_utils::to_vec1_round(&tensor.round_to(-2)?, 4)?, [3000.0, 300.] ); Ok(()) } fn binary_op(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor1 = Tensor::new(data, device)?; let data2 = &[[5f32, 5., 5., 5., 5.], [2., 1., 7., 8., 2.]]; let tensor2 = Tensor::new(data2, device)?; let tensor = (&tensor1 + (&tensor1 * &tensor1)? / (&tensor1 + &tensor2))?; let dims = tensor.dims2()?; assert_eq!(dims, (2, 5)); let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content[0], [4.125, 1.1666666, 5.7777777, 1.1666666, 7.5]); assert_eq!(content[1], [3.0, 1.5, 10.5, 12.0, 3.0]); #[allow(clippy::eq_op)] let tensor = (&tensor - &tensor)?; let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content[0], [0., 0., 0., 0., 0.]); let min = tensor1.minimum(&(&tensor2 * 0.5)?)?; let max = tensor1.maximum(&(&tensor2 * 0.5)?)?; assert_eq!( min.to_vec2::<f32>()?, [[2.5, 1.0, 2.5, 1.0, 2.5], [1.0, 0.5, 3.5, 4.0, 1.0]], ); assert_eq!( max.to_vec2::<f32>()?, [[3.0, 2.5, 4.0, 2.5, 5.0], [2.0, 1.0, 7.0, 8.0, 2.0]] ); Ok(()) } fn transpose(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?.t()?; let dims = tensor.dims2()?; assert_eq!(dims, (5, 2)); assert_eq!( tensor.to_vec2::<f32>()?, &[[3f32, 2.], [1., 1.], [4., 7.], [1., 8.], [5., 2.]] ); assert_eq!(tensor.t()?.to_vec2::<f32>()?, data); assert_eq!(tensor.contiguous()?.t()?.to_vec2::<f32>()?, data); assert_eq!(((tensor + 1.)?.t()? - 1.)?.to_vec2::<f32>()?, data); Ok(()) } fn var(device: &Device) -> Result<()> { // Values taken from https://pytorch.org/docs/stable/generated/torch.var.html let data = &[ [0.2035f32, 1.2959, 1.8101, -0.4644], [1.5027, -0.3270, 0.5905, 0.6538], [-1.5745, 1.3330, -0.5596, -0.6548], [0.1264, -0.5080, 1.6420, 0.1992], ]; let tensor = Tensor::new(data, device)?; assert_eq!( test_utils::to_vec2_round(&tensor.var_keepdim(1)?, 4)?, &[[1.0631], [0.559], [1.4893], [0.8258]] ); Ok(()) } fn sum(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.sum_keepdim(2)?.to_vec3::<u32>()?, &[[[8], [15]], [[10], [18]]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec3::<u32>()?, &[[[5, 2, 11], [9, 7, 17]]], ); assert_eq!(tensor.sum_keepdim((0, 2, 1))?.to_vec3::<u32>()?, &[[[51]]],); assert_eq!( tensor.t()?.sum_keepdim(1)?.t()?.to_vec3::<u32>()?, &[[[8], [15]], [[10], [18]]] ); assert_eq!( tensor.sum_keepdim((2, 1))?.to_vec3::<u32>()?, &[[[8 + 15]], [[10 + 18]]] ); let data: Vec<u32> = (0..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.sum_keepdim(0)?.to_vec1::<u32>()?, &[7998000]); let tensor = tensor.reshape((2000, 2))?; assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]); assert_eq!( tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec2::<u32>()?, &[[3998000, 4000000]] ); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]); assert_eq!( tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec2::<u32>()?, &[[3998000, 4000000]] ); let t1 = tensor.reshape((200, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor.sum_keepdim((0, 1, 2))?.to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(0)? .sum_keepdim(2)? .sum_keepdim(1)? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(0)? .sum_keepdim((1, 2))? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(1)? .sum_keepdim((0, 2))? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec3::<u32>()?, &[[ [398000, 398200, 398400, 398600], [398800, 399000, 399200, 399400], [399600, 399800, 400000, 400200], [400400, 400600, 400800, 401000], [401200, 401400, 401600, 401800] ]] ); } Ok(()) } fn min(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.min_keepdim(2)?.to_vec3::<u32>()?, &[[[1], [1]], [[1], [2]]] ); assert_eq!( tensor.min_keepdim(0)?.to_vec3::<u32>()?, &[[[2, 1, 4], [1, 2, 8]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.min_keepdim(0)?.to_vec1::<u32>()?, &[200]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!( tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!( tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .min_keepdim(0)? .min_keepdim(2)? .min_keepdim(1)? .to_vec3::<u32>()?, &[[[200]]] ); assert_eq!( tensor.min_keepdim(0)?.to_vec3::<u32>()?, &[[ [200, 201, 202, 203], [204, 205, 206, 207], [208, 209, 210, 211], [212, 213, 214, 215], [216, 217, 218, 219] ]] ); } Ok(()) } fn max(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.max_keepdim(2)?.to_vec3::<u32>()?, &[[[4], [9]], [[7], [8]]] ); assert_eq!( tensor.max_keepdim(0)?.to_vec3::<u32>()?, &[[[3, 1, 7], [8, 5, 9]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.max_keepdim(0)?.to_vec1::<u32>()?, &[3999]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!( tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!( tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .max_keepdim(0)? .max_keepdim(2)? .max_keepdim(1)? .to_vec3::<u32>()?, &[[[3999]]] ); assert_eq!( tensor.max_keepdim(0)?.to_vec3::<u32>()?, &[[ [3980, 3981, 3982, 3983], [3984, 3985, 3986, 3987], [3988, 3989, 3990, 3991], [3992, 3993, 3994, 3995], [3996, 3997, 3998, 3999] ]] ); } Ok(()) } fn argmin(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.argmin_keepdim(2)?.to_vec3::<u32>()?, &[[[1], [0]], [[1], [1]]] ); assert_eq!( tensor.argmin_keepdim(0)?.to_vec3::<u32>()?, &[[[1, 0, 0], [0, 1, 1]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.argmin_keepdim(0)?.to_vec1::<u32>()?, &[0]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmin_keepdim(1)? .argmin_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmin_keepdim(1)? .argmin_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(2)? .argmin_keepdim(1)? .to_vec3::<u32>()?, &[[[0]]] ); assert_eq!( tensor.argmin_keepdim(0)?.to_vec3::<u32>()?, &[[ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], ]] ); } Ok(()) } fn argmax(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.argmax_keepdim(2)?.to_vec3::<u32>()?, &[[[2], [2]], [[2], [0]]] ); assert_eq!( tensor.argmax_keepdim(0)?.to_vec3::<u32>()?, &[[[0, 0, 1], [1, 0, 0]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.argmax_keepdim(0)?.to_vec1::<u32>()?, &[3799]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmax_keepdim(1)? .argmax_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmax_keepdim(1)? .argmax_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(2)? .argmax_keepdim(1)? .to_vec3::<u32>()?, &[[[0]]] ); assert_eq!( tensor.argmax_keepdim(0)?.to_vec3::<u32>()?, &[[ [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], ]] ); } Ok(()) } fn narrow(device: &Device) -> Result<()> { let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.narrow(2, 1, 2)?.to_vec3::<f32>()?, &[[[1.0, 4.0], [5.0, 9.0]], [[1.0, 7.0], [2.0, 8.0]]], ); assert_eq!( tensor.narrow(1, 1, 1)?.to_vec3::<f32>()?, &[[[1.0, 5.0, 9.0]], [[8.0, 2.0, 8.0]]], ); assert_eq!( tensor.narrow(0, 0, 1)?.to_vec3::<f32>()?, &[[[3.0, 1.0, 4.0], [1.0, 5.0, 9.0]]], ); assert_eq!( tensor.narrow(0, 1, 1)?.to_vec3::<f32>()?, &[[[2.0, 1.0, 7.0], [8.0, 2.0, 8.0]]], ); // The following has been checked against PyTorch via: // import torch // t = torch.tensor([[[3., 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]]) // t.transpose(-1, -2).narrow(1, 1, 2) assert_eq!( tensor.t()?.narrow(1, 1, 2)?.to_vec3::<f32>()?, &[[[1.0, 5.0], [4.0, 9.0]], [[1.0, 2.0], [7.0, 8.0]]], ); Ok(()) } fn broadcast(device: &Device) -> Result<()> { let data = &[3f32, 1., 4.]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.broadcast_left((3, 1))?.to_vec3::<f32>()?, &[[[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]]] ); Ok(()) } fn cat(device: &Device) -> Result<()> { // 1D let t1 = Tensor::new(&[3f32, 1., 4.], device)?; let t2 = Tensor::new(&[1f32, 5., 9., 2.], device)?; let t3 = Tensor::new(&[6f32, 5., 3., 5., 8., 9.], device)?; assert_eq!(Tensor::cat(&[&t1], 0)?.to_vec1::<f32>()?, [3f32, 1., 4.],); assert_eq!( Tensor::cat(&[&t1, &t2], 0)?.to_vec1::<f32>()?, [3f32, 1., 4., 1., 5., 9., 2.], ); assert_eq!( Tensor::cat(&[&t1, &t2, &t3], 0)?.to_vec1::<f32>()?, [3f32, 1., 4., 1., 5., 9., 2., 6., 5., 3., 5., 8., 9.], ); // 2D let data = &[[3f32, 1., 4., 1., 5.], [2., 7., 1., 8., 2.]]; let t1 = Tensor::new(data, device)?; let data2 = &[[5f32, 5., 5., 5., 5.], [2., 7., 1., 8., 2.]]; let t2 = Tensor::new(data2, device)?; assert_eq!( Tensor::cat(&[&t1, &t2], 0)?.to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0], [5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0] ] ); // PyTorch equivalent: // import torch // t1 = torch.tensor([[3, 1, 4, 1, 5], [2, 7, 1, 8, 2]]) // t2 = torch.tensor([[5]*5, [2, 7, 1, 8, 2]]) // torch.cat([t1.t(), t2.t()], dim=1).t() assert_eq!( Tensor::cat(&[&t1.t()?, &t2.t()?], 1)? .t()? .to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0], [5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0] ] ); assert_eq!( Tensor::cat(&[&t1, &t2], 1)?.to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0, 2.0, 7.0, 1.0, 8.0, 2.0] ] ); Ok(()) } fn embeddings(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?; let t = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?; let hs = t.embedding(&ids)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]); let hs = t.index_select(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]); Ok(()) } fn cmp(device: &Device) -> Result<()> { let t1 = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?; let t2 = Tensor::new(&[[1f32, 0f32], [3f32, 3f32], [4f32, 7f32]], device)?; assert_eq!(t1.eq(&t2)?.to_vec2::<u8>()?, &[[0, 0], [0, 1], [1, 0]]); assert_eq!(t1.ne(&t2)?.to_vec2::<u8>()?, &[[1, 1], [1, 0], [0, 1]]); assert_eq!(t1.le(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 1], [1, 1]]); assert_eq!(t1.lt(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 0], [0, 1]]); assert_eq!(t1.gt(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 0], [0, 0]]); assert_eq!(t1.ge(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 1], [1, 0]]); Ok(()) } fn index_select(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let hs = t.index_select(&ids, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [0.0, 2.0, 1.0], [3.0, 5.0, 4.0], [6.0, 8.0, 7.0], [9.0, 11.0, 10.0] ] ); let hs = t.index_select(&ids, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[[0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0]] ); // Prior to https://github.com/huggingface/candle/pull/1022 // There would be a bug where the last values in the result tensor would be set to 0. let ids = Tensor::new(&[0u32, 2u32, 1u32, 0u32, 2u32, 1u32], device)?; let hs = t.index_select(&ids, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0], [0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0], ] ); // Test when selecting dim > 0 with ids size different from elem count of // target dim in source/input. let ids = Tensor::new(&[1u32, 0u32, 1u32], device)?; let t = Tensor::arange(1f32, 5f32, device)?.reshape((2, 2))?; assert_eq!(t.to_vec2::<f32>()?, &[[1.0, 2.0], [3.0, 4.0]]); let hs = t.index_select(&ids, 1)?; assert_eq!(hs.to_vec2::<f32>()?, &[[2.0, 1.0, 2.0], [4.0, 3.0, 4.0]]); Ok(()) } fn index_add(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 1u32, 1u32], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let init = Tensor::ones((4, 2), DType::F32, device)?; let hs = init.index_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[1.0, 4.0], [4.0, 10.0], [7.0, 16.0], [10.0, 22.0]], ); let init = Tensor::zeros((4, 2), DType::F32, device)?; let ids = Tensor::new(&[1u32, 0u32, 0u32], device)?; let hs = init.index_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[3.0, 0.0], [9.0, 3.0], [15.0, 6.0], [21.0, 9.0]], ); let init = Tensor::zeros((6, 3), DType::F32, device)?; let ids = Tensor::new(&[5u32, 0u32, 1u32, 0u32], device)?; let hs = init.index_add(&ids, &t, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [12.0, 14.0, 16.0], [6.0, 7.0, 8.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 2.0] ] ); Ok(()) } fn slice_scatter(device: &Device) -> Result<()> { let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let src = Tensor::arange(100f32, 106f32, device)?.reshape((2, 3))?; assert_eq!( t.slice_scatter0(&src, 0)?.to_vec2::<f32>()?, &[ [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); assert_eq!( t.slice_scatter0(&src, 1)?.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], [9.0, 10.0, 11.0] ] ); assert_eq!( t.slice_scatter0(&src, 2)?.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], ] ); Ok(()) } fn scatter_add(device: &Device) -> Result<()> { let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let ids = Tensor::new(&[[0u32, 1, 2], [3, 4, 0], [3, 3, 1], [2, 0, 4]], device)?; let init = Tensor::ones((4, 5), DType::F32, device)?; let hs = init.scatter_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [1.0, 2.0, 3.0, 1.0, 1.0], [6.0, 1.0, 1.0, 4.0, 5.0], [1.0, 9.0, 1.0, 14.0, 1.0], [11.0, 1.0, 10.0, 1.0, 12.0] ] ); let init = Tensor::ones((6, 3), DType::F32, device)?; let hs = init.scatter_add(&ids, &t, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [1.0, 11.0, 6.0], [1.0, 2.0, 9.0], [10.0, 1.0, 3.0], [10.0, 8.0, 1.0], [1.0, 5.0, 12.0], [1.0, 1.0, 1.0] ] ); Ok(()) } fn gather(device: &Device) -> Result<()> { let ids = Tensor::new(&[[0u32], [2u32], [1u32], [0u32]], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let hs = t.gather(&ids, 1)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0], [5.0], [7.0], [9.0]]); let ids = Tensor::new( &[[0u32, 0u32], [2u32, 0u32], [1u32, 1u32], [0u32, 2u32]], device, )?; let hs = t.gather(&ids, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[0.0, 0.0], [5.0, 3.0], [7.0, 7.0], [9.0, 11.0]] ); let ids = Tensor::new(&[[0u32, 2u32, 0u32]], device)?; let hs = t.gather(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 7.0, 2.0]]); let ids = Tensor::new(&[[0u32, 2u32, 0u32], [0u32, 1u32, 1u32]], device)?; let hs = t.gather(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 7.0, 2.0], [0.0, 4.0, 5.0]]); Ok(()) } fn matmul(device: &Device) -> Result<()> { let data = vec![1.0f32, 2.0, 3.0, 4.0]; let a = Tensor::from_slice(&data, (2, 2), device)?; let data = vec![1.0f32, 2.0, 3.0, 4.0]; let b = Tensor::from_slice(&data, (2, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]); let data = vec![1.0f32, 2.0]; let a = Tensor::from_slice(&data, (2, 1), device)?; let data = vec![3.0f32, 4.0]; let b = Tensor::from_slice(&data, (1, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[&[3.0, 4.0], &[6.0, 8.0]]); let data: Vec<_> = (0..6).map(|i| i as f32).collect(); let a = Tensor::from_slice(&data, (2, 3), device)?; let data: Vec<_> = (0..6).map(|i| (i + 2) as f32).collect(); let b = Tensor::from_slice(&data, (3, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[&[16., 19.], &[52., 64.]]); let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let a = Tensor::from_slice(&data, (2, 2, 3), device)?; let data: Vec<_> = (0..12).map(|i| (i + 2) as f32).collect(); let b = Tensor::from_slice(&data, (2, 3, 2), device)?; let expected = [[[16., 19.], [52., 64.]], [[214., 235.], [304., 334.]]]; let c = a.matmul(&b)?; assert_eq!(c.to_vec3::<f32>()?, &expected); // Also perform the matmul on contiguous transposed versions. let a_tt = a.t()?.contiguous()?.t()?; assert!(!a_tt.is_contiguous()); assert_eq!(a.dims(), a_tt.dims()); assert_eq!(a_tt.stride(), &[6, 1, 2]); let b_tt = b.t()?.contiguous()?.t()?; assert!(!b_tt.is_contiguous()); assert_eq!(b.dims(), b_tt.dims()); assert_eq!(b_tt.stride(), &[6, 1, 3]); assert_eq!(a_tt.matmul(&b)?.to_vec3::<f32>()?, &expected); assert_eq!(a.matmul(&b_tt)?.to_vec3::<f32>()?, &expected); assert_eq!(a_tt.matmul(&b_tt)?.to_vec3::<f32>()?, &expected); Ok(()) } fn broadcast_matmul(device: &Device) -> Result<()> { let lhs = Tensor::randn(0f32, 1f32, (3, 1, 4, 5), device)?; let rhs = Tensor::randn(0f32, 1f32, (6, 5, 2), device)?; let out = lhs.broadcast_matmul(&rhs)?; assert_eq!(out.dims(), &[3, 6, 4, 2]); for idx1 in 0..3 { for idx2 in 0..6 { let out = out.i((idx1, idx2))?; let lhs = lhs.i((idx1, 0))?; let rhs = rhs.i(idx2)?; let out2 = lhs.matmul(&rhs); let sum_diff2 = (out - out2)?.sqr()?.sum_all()?; // With cuda, we see errors of up to ~1e-12. assert!(sum_diff2.to_vec0::<f32>()? < 1e-6) } } Ok(()) } fn broadcasting(device: &Device) -> Result<()> { let t1 = Tensor::arange(0f32, 24f32, device)?.reshape((4, 2, 3))?; let t2 = Tensor::new(&[100f32, 200f32], device)?; let s = t1.broadcast_add(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[100.0, 101.0, 102.0], [203.0, 204.0, 205.0]], [[106.0, 107.0, 108.0], [209.0, 210.0, 211.0]], [[112.0, 113.0, 114.0], [215.0, 216.0, 217.0]], [[118.0, 119.0, 120.0], [221.0, 222.0, 223.0]] ] ); let s = t1.t()?.broadcast_add(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[100.0, 203.0], [101.0, 204.0], [102.0, 205.0]], [[106.0, 209.0], [107.0, 210.0], [108.0, 211.0]], [[112.0, 215.0], [113.0, 216.0], [114.0, 217.0]], [[118.0, 221.0], [119.0, 222.0], [120.0, 223.0]] ] ); let s = t1.broadcast_sub(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-100.0, -99.0, -98.0], [-197.0, -196.0, -195.0]], [[-94.0, -93.0, -92.0], [-191.0, -190.0, -189.0]], [[-88.0, -87.0, -86.0], [-185.0, -184.0, -183.0]], [[-82.0, -81.0, -80.0], [-179.0, -178.0, -177.0]] ] ); let s = t1.t()?.broadcast_sub(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-100.0, -197.0], [-99.0, -196.0], [-98.0, -195.0]], [[-94.0, -191.0], [-93.0, -190.0], [-92.0, -189.0]], [[-88.0, -185.0], [-87.0, -184.0], [-86.0, -183.0]], [[-82.0, -179.0], [-81.0, -178.0], [-80.0, -177.0]] ] ); // Test a narrowed version as this uses a layout start_offset. let t1 = t1.i(2..)?; let s = t1.broadcast_add(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[112.0, 113.0, 114.0], [215.0, 216.0, 217.0]], [[118.0, 119.0, 120.0], [221.0, 222.0, 223.0]] ] ); let s = t1.t()?.broadcast_add(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[112.0, 215.0], [113.0, 216.0], [114.0, 217.0]], [[118.0, 221.0], [119.0, 222.0], [120.0, 223.0]] ] ); let s = t1.broadcast_sub(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-88.0, -87.0, -86.0], [-185.0, -184.0, -183.0]], [[-82.0, -81.0, -80.0], [-179.0, -178.0, -177.0]] ] ); let s = t1.t()?.broadcast_sub(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-88.0, -185.0], [-87.0, -184.0], [-86.0, -183.0]], [[-82.0, -179.0], [-81.0, -178.0], [-80.0, -177.0]] ] ); let t3 = Tensor::new(1f32, device)?.broadcast_div(&t2)?; let s = t1.broadcast_mul(&t2.reshape((2, 1))?)?; let s_div = t1.broadcast_div(&t3.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[1200.0, 1300.0, 1400.0], [3000.0, 3200.0, 3400.0]], [[1800.0, 1900.0, 2000.0], [4200.0, 4400.0, 4600.0]] ] ); assert_eq!(s.to_vec3::<f32>()?, s_div.to_vec3::<f32>()?,); let s = t1.t()?.broadcast_mul(&t2)?; let s_div = t1.t()?.broadcast_div(&t3)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[1200.0, 3000.0], [1300.0, 3200.0], [1400.0, 3400.0]], [[1800.0, 4200.0], [1900.0, 4400.0], [2000.0, 4600.0]] ] ); assert_eq!(s.to_vec3::<f32>()?, s_div.to_vec3::<f32>()?,); Ok(()) } fn randn(device: &Device) -> Result<()> { let tensor = Tensor::randn(0f32, 1f32, (5, 3), device)?; assert_eq!(tensor.dims(), [5, 3]); let tensor = Tensor::rand(0f32, 1f32, (5, 3), device)?; assert_eq!(tensor.dims(), [5, 3]); Ok(()) } test_device!(zeros, zeros_cpu, zeros_gpu, zeros_metal); test_device!(ones, ones_cpu, ones_gpu, ones_metal); test_device!(full, full_cpu, full_gpu, full_metal); test_device!(arange, arange_cpu, arange_gpu, arange_metal); test_device!(add_mul, add_mul_cpu, add_mul_gpu, add_mul_metal); test_device!(tensor_2d, tensor_2d_cpu, tensor_2d_gpu, tensor_2d_metal); test_device!(narrow, narrow_cpu, narrow_gpu, narrow_metal); test_device!(broadcast, broadcast_cpu, broadcast_gpu, broadcast_metal); test_device!(cat, cat_cpu, cat_gpu, cat_metal); test_device!(sum, sum_cpu, sum_gpu, sum_metal); test_device!(min, min_cpu, min_gpu, min_metal); test_device!(max, max_cpu, max_gpu, max_metal); test_device!(argmax, argmax_cpu, argmax_gpu, argmax_metal); test_device!(argmin, argmin_cpu, argmin_gpu, argmin_metal); test_device!(transpose, transpose_cpu, transpose_gpu, transpose_metal); test_device!(unary_op, unary_op_cpu, unary_op_gpu, unary_op_metal); test_device!(binary_op, binary_op_cpu, binary_op_gpu, binary_op_metal); test_device!(embeddings, embeddings_cpu, embeddings_gpu, embeddings_metal); test_device!(cmp, cmp_cpu, cmp_gpu, cmp_metal); test_device!(matmul, matmul_cpu, matmul_gpu, matmul_metal); test_device!( broadcast_matmul, broadcast_matmul_cpu, broadcast_matmul_gpu, broadcast_matmul_metal ); test_device!( broadcasting, broadcasting_cpu, broadcasting_gpu, broadcasting_metal ); test_device!( index_select, index_select_cpu, index_select_gpu, index_select_metal ); test_device!(index_add, index_add_cpu, index_add_gpu, index_add_metal); test_device!(gather, gather_cpu, gather_gpu, gather_metal); test_device!( scatter_add, scatter_add_cpu, scatter_add_gpu, scatter_add_metal ); test_device!( slice_scatter, slice_scatter_cpu, slice_scatter_gpu, slice_scatter_metal ); test_device!(randn, randn_cpu, randn_gpu, randn_metal); test_device!(clamp, clamp_cpu, clamp_gpu, clamp_metal); test_device!(var, var_cpu, var_gpu, var_metal); // There was originally a bug on the CPU implementation for randn // https://github.com/huggingface/candle/issues/381 #[test] fn randn_hasneg() -> Result<()> { let t = Tensor::randn(0f32, 1f32, 200, &Device::Cpu)?.to_vec1::<f32>()?; if t.iter().all(|&v| v >= 0.) { candle_core::bail!("all values in tensors are non-negative") } Ok(()) } #[test] fn pad_with_same() -> Result<()> { let t = Tensor::arange(1f32, 5f32, &Device::Cpu)?.reshape((2, 2))?; let t0 = t.pad_with_same(0, 1, 2)?; assert_eq!( t0.to_vec2::<f32>()?, [[1.0, 2.0], [1.0, 2.0], [3.0, 4.0], [3.0, 4.0], [3.0, 4.0]] ); let t1 = t.pad_with_same(1, 1, 2)?; assert_eq!( t1.to_vec2::<f32>()?, [[1.0, 1.0, 2.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0, 4.0]] ); Ok(()) } #[test] fn i64_abs() -> Result<()> { let t = Tensor::new(&[-42i64, 1337], &Device::Cpu)?; let t = t.abs()?; assert_eq!(t.to_vec1::<i64>()?, [42, 1337]); Ok(()) } #[test] fn tril_triu_eye() -> Result<()> { let t = Tensor::tril2(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 1.0, 1.0] ], ); let t = Tensor::triu2(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0] ] ); let t = Tensor::eye(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0] ] ); Ok(()) } #[test] fn cumsum() -> Result<()> { let t = &[3f32, 1., 4., 1., 5.]; let t = Tensor::new(t, &Device::Cpu)?; assert_eq!(t.cumsum(0)?.to_vec1::<f32>()?, [3., 4., 8., 9., 14.]); let t = t.unsqueeze(1)?; assert_eq!( t.cumsum(0)?.to_vec2::<f32>()?, [[3.0], [4.0], [8.0], [9.0], [14.0]] ); assert_eq!( t.cumsum(1)?.to_vec2::<f32>()?, [[3.0], [1.0], [4.0], [1.0], [5.0]] ); let t = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let t = Tensor::new(t, &Device::Cpu)?; assert_eq!( t.cumsum(1)?.to_vec2::<f32>()?, [[3.0, 4.0, 8.0, 9.0, 14.0], [2.0, 3.0, 10.0, 18.0, 20.0]], ); assert_eq!( t.cumsum(0)?.to_vec2::<f32>()?, [[3.0, 1.0, 4.0, 1.0, 5.0], [5.0, 2.0, 11.0, 9.0, 7.0]] ); Ok(()) } /// A helper function for floating point comparison. Both a and b must be 1D Tensor and contains the same amount of data. /// Assertion passes if the difference of all pairs of a and b is smaller than epsilon. fn assert_close(a: &Tensor, b: &Tensor, epsilon: f64) -> Result<()> { let a_vec: Vec<f64> = a.to_vec1()?; let b_vec: Vec<f64> = b.to_vec1()?; assert_eq!(a_vec.len(), b_vec.len()); for (a, b) in a_vec.iter().zip(b_vec.iter()) { assert!((a - b).abs() < epsilon); } Ok(()) } #[test] fn log_sum_exp() -> Result<()> { let input = Tensor::new(&[[1f64, 2., 3.], [4., 5., 6.]], &Device::Cpu)?; let output = input.log_sum_exp(D::Minus1)?; // The expectations obtained from pytorch. let expected = Tensor::new(&[3.4076, 6.4076], &Device::Cpu)?; assert_close(&output, &expected, 0.00001)?; Ok(()) } #[test] fn pow() -> Result<()> { let lhs = Tensor::new(&[[1f32, 2., 3.], [4., 5., 6.]], &Device::Cpu)?; let rhs = (&lhs - 2.)?; let res = lhs.pow(&rhs)?; assert_eq!( test_utils::to_vec2_round(&res, 4)?, [[1.0, 1.0, 3.0], [16.0, 125.0, 1296.0001]] ); Ok(()) }
candle/candle-core/tests/tensor_tests.rs/0
{ "file_path": "candle/candle-core/tests/tensor_tests.rs", "repo_id": "candle", "token_count": 23779 }
20
# candle-bert Bert is a general large language model. In this example it can be used for two different tasks: - Compute sentence embeddings for a prompt. - Compute similarities between a set of sentences. ## Sentence embeddings Bert is used to compute the sentence embeddings for a prompt. The model weights are downloaded from the hub on the first run. ```bash cargo run --example bert --release -- --prompt "Here is a test sentence" > [[[ 0.0798, -0.0665, -0.0247, ..., -0.1082, -0.1000, -0.2751], > [ 0.4218, 0.2690, 0.2740, ..., 0.3889, 1.3503, 0.9908], > [ 0.0466, 0.3041, -0.1143, ..., 0.4427, 0.6926, -0.1515], > ... > [ 0.3396, 0.4320, -0.4408, ..., 0.9212, 0.2331, -0.6777], > [ 0.2789, 0.7539, 0.4306, ..., -0.0095, 0.3375, -1.7529], > [ 0.6737, 0.7882, 0.0548, ..., 0.1836, 0.7299, -0.6617]]] > Tensor[[1, 7, 384], f32] ``` ### Custom models You can specify different models, such as BGE, with the `--model-id` flag: ```bash cargo run --example bert --release -- \ --model-id BAAI/bge-large-zh-v1.5 \ --prompt "Here is a test sentence" Loaded and encoded 435.70775ms [[[ 3.0944e-1, -7.8455e-5, -1.2768e0, ..., 1.3755e-2, -3.2371e-1, 2.3819e-1], [-2.8506e-1, 1.9953e-1, -1.3076e0, ..., 6.9819e-2, 1.0833e-2, -1.1512e0], [ 3.9892e-1, 2.0000e-1, -9.3178e-1, ..., -4.1393e-1, -4.9644e-2, -3.3786e-1], ... [ 6.0345e-1, 3.5744e-1, -1.2672e0, ..., -6.9165e-1, -3.4973e-3, -8.4214e-1], [ 3.9218e-1, -3.2735e-1, -1.3123e0, ..., -4.9318e-1, -5.1334e-1, -3.6391e-1], [ 3.0978e-1, 2.5662e-4, -1.2773e0, ..., 1.3357e-2, -3.2390e-1, 2.3858e-1]]] Tensor[[1, 9, 1024], f32] Took 176.744667ms ``` ### Gelu approximation You can get a speedup by using an approximation of the gelu activation, with a small loss of precision, by passing the `--approximate-gelu` flag: ```bash $ cargo run --example bert --release -- \ --model-id BAAI/bge-large-zh-v1.5 \ --prompt "Here is a test sentence" \ --approximate-gelu Loaded and encoded 244.388042ms [[[ 3.1048e-1, -6.0339e-4, -1.2758e0, ..., 1.3718e-2, -3.2362e-1, 2.3775e-1], [-2.8354e-1, 1.9984e-1, -1.3077e0, ..., 6.9390e-2, 9.9681e-3, -1.1531e0], [ 3.9947e-1, 1.9917e-1, -9.3178e-1, ..., -4.1301e-1, -5.0719e-2, -3.3955e-1], ... [ 6.0499e-1, 3.5664e-1, -1.2642e0, ..., -6.9134e-1, -3.4581e-3, -8.4471e-1], [ 3.9311e-1, -3.2812e-1, -1.3105e0, ..., -4.9291e-1, -5.1270e-1, -3.6543e-1], [ 3.1082e-1, -2.6737e-4, -1.2762e0, ..., 1.3319e-2, -3.2381e-1, 2.3815e-1]]] Tensor[[1, 9, 1024], f32] Took 116.840791ms ``` ## Similarities In this example, Bert is used to compute the sentence embeddings for a set of sentences (hardcoded in the examples). Then cosine similarities are computed for each sentence pair and they are reported by decreasing values, hence the first reported pair contains the two sentences that have the highest similarity score. The sentence embeddings are computed using average pooling through all the sentence tokens, including some potential padding. ```bash cargo run --example bert --release > score: 0.85 'The new movie is awesome' 'The new movie is so great' > score: 0.61 'The cat sits outside' 'The cat plays in the garden' > score: 0.52 'I love pasta' 'Do you like pizza?' > score: 0.23 'The new movie is awesome' 'Do you like pizza?' > score: 0.22 'I love pasta' 'The new movie is awesome' ```
candle/candle-examples/examples/bert/README.md/0
{ "file_path": "candle/candle-examples/examples/bert/README.md", "repo_id": "candle", "token_count": 1564 }
21
# candle-falcon Falcon is a general large language model.
candle/candle-examples/examples/falcon/README.md/0
{ "file_path": "candle/candle-examples/examples/falcon/README.md", "repo_id": "candle", "token_count": 17 }
22
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::mistral::{Config, Model as Mistral}; use candle_transformers::models::quantized_mistral::Model as QMistral; use candle::{DType, Device, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; enum Model { Mistral(Mistral), Quantized(QMistral), } struct TextGeneration { model: Model, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); for &t in tokens.iter() { if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("</s>") { Some(token) => token, None => anyhow::bail!("cannot find the </s> token"), }; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let start_pos = tokens.len().saturating_sub(context_size); let ctxt = &tokens[start_pos..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = match &mut self.model { Model::Mistral(m) => m.forward(&input, start_pos)?, Model::Quantized(m) => m.forward(&input, start_pos)?, }; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] use_flash_attn: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 100)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long, default_value = "main")] revision: String, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] weight_files: Option<String>, #[arg(long)] quantized: bool, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let model_id = match args.model_id { Some(model_id) => model_id, None => { if args.quantized { "lmz/candle-mistral".to_string() } else { "mistralai/Mistral-7B-v0.1".to_string() } } }; let repo = api.repo(Repo::with_revision( model_id, RepoType::Model, args.revision, )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("tokenizer.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => { if args.quantized { vec![repo.get("model-q4k.gguf")?] } else { candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")? } } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let config = Config::config_7b_v0_1(args.use_flash_attn); let device = candle_examples::device(args.cpu)?; let (model, device) = if args.quantized { let filename = &filenames[0]; let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(filename, &device)?; let model = QMistral::new(&config, vb)?; (Model::Quantized(model), device) } else { let dtype = if device.is_cuda() { DType::BF16 } else { DType::F32 }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let model = Mistral::new(&config, vb)?; (Model::Mistral(model), device) }; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/mistral/main.rs/0
{ "file_path": "candle/candle-examples/examples/mistral/main.rs", "repo_id": "candle", "token_count": 4018 }
23
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use std::io::Write; use std::path::PathBuf; use candle_transformers::models::quantized_t5 as t5; use anyhow::{Error as E, Result}; use candle::{Device, Tensor}; use candle_transformers::generation::LogitsProcessor; use clap::{Parser, ValueEnum}; use hf_hub::{api::sync::Api, api::sync::ApiRepo, Repo, RepoType}; use tokenizers::Tokenizer; #[derive(Clone, Debug, Copy, ValueEnum)] enum Which { T5Small, FlanT5Small, FlanT5Base, FlanT5Large, FlanT5Xl, FlanT5Xxl, } #[derive(Parser, Debug, Clone)] #[command(author, version, about, long_about = None)] struct Args { /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// The model repository to use on the HuggingFace hub. #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] weight_file: Option<String>, #[arg(long)] config_file: Option<String>, // Enable/disable decoding. #[arg(long, default_value = "false")] disable_cache: bool, /// Use this prompt, otherwise compute sentence similarities. #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long, default_value_t = 0.8)] temperature: f64, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, /// The model size to use. #[arg(long, default_value = "t5-small")] which: Which, } struct T5ModelBuilder { device: Device, config: t5::Config, weights_filename: PathBuf, } impl T5ModelBuilder { pub fn load(args: &Args) -> Result<(Self, Tokenizer)> { let device = Device::Cpu; let default_model = "lmz/candle-quantized-t5".to_string(); let (model_id, revision) = match (args.model_id.to_owned(), args.revision.to_owned()) { (Some(model_id), Some(revision)) => (model_id, revision), (Some(model_id), None) => (model_id, "main".to_string()), (None, Some(revision)) => (default_model, revision), (None, None) => (default_model, "main".to_string()), }; let repo = Repo::with_revision(model_id, RepoType::Model, revision); let api = Api::new()?; let api = api.repo(repo); let config_filename = match &args.config_file { Some(filename) => Self::get_local_or_remote_file(filename, &api)?, None => match args.which { Which::T5Small => api.get("config.json")?, Which::FlanT5Small => api.get("config-flan-t5-small.json")?, Which::FlanT5Base => api.get("config-flan-t5-base.json")?, Which::FlanT5Large => api.get("config-flan-t5-large.json")?, Which::FlanT5Xl => api.get("config-flan-t5-xl.json")?, Which::FlanT5Xxl => api.get("config-flan-t5-xxl.json")?, }, }; let tokenizer_filename = api.get("tokenizer.json")?; let weights_filename = match &args.weight_file { Some(filename) => Self::get_local_or_remote_file(filename, &api)?, None => match args.which { Which::T5Small => api.get("model.gguf")?, Which::FlanT5Small => api.get("model-flan-t5-small.gguf")?, Which::FlanT5Base => api.get("model-flan-t5-base.gguf")?, Which::FlanT5Large => api.get("model-flan-t5-large.gguf")?, Which::FlanT5Xl => api.get("model-flan-t5-xl.gguf")?, Which::FlanT5Xxl => api.get("model-flan-t5-xxl.gguf")?, }, }; let config = std::fs::read_to_string(config_filename)?; let mut config: t5::Config = serde_json::from_str(&config)?; config.use_cache = !args.disable_cache; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; Ok(( Self { device, config, weights_filename, }, tokenizer, )) } pub fn build_model(&self) -> Result<t5::T5ForConditionalGeneration> { let device = Device::Cpu; let vb = t5::VarBuilder::from_gguf(&self.weights_filename, &device)?; Ok(t5::T5ForConditionalGeneration::load(vb, &self.config)?) } fn get_local_or_remote_file(filename: &str, api: &ApiRepo) -> Result<PathBuf> { let local_filename = std::path::PathBuf::from(filename); if local_filename.exists() { Ok(local_filename) } else { Ok(api.get(filename)?) } } } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let (builder, mut tokenizer) = T5ModelBuilder::load(&args)?; let device = &builder.device; let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; let tokens = tokenizer .encode(args.prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let mut model = builder.build_model()?; let mut output_token_ids = [builder .config .decoder_start_token_id .unwrap_or(builder.config.pad_token_id) as u32] .to_vec(); let temperature = if args.temperature <= 0. { None } else { Some(args.temperature) }; let mut logits_processor = LogitsProcessor::new(299792458, temperature, args.top_p); let encoder_output = model.encode(&input_token_ids)?; let start = std::time::Instant::now(); for index in 0.. { if output_token_ids.len() > 512 { break; } let decoder_token_ids = if index == 0 || !builder.config.use_cache { Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)? } else { let last_token = *output_token_ids.last().unwrap(); Tensor::new(&[last_token], device)?.unsqueeze(0)? }; let logits = model .decode(&decoder_token_ids, &encoder_output)? .squeeze(0)?; let logits = if args.repeat_penalty == 1. { logits } else { let start_at = output_token_ids.len().saturating_sub(args.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, args.repeat_penalty, &output_token_ids[start_at..], )? }; let next_token_id = logits_processor.sample(&logits)?; if next_token_id as usize == builder.config.eos_token_id { break; } output_token_ids.push(next_token_id); if let Some(text) = tokenizer.id_to_token(next_token_id) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); print!("{text}"); std::io::stdout().flush()?; } } let dt = start.elapsed(); println!( "\n{} tokens generated ({:.2} token/s)\n", output_token_ids.len(), output_token_ids.len() as f64 / dt.as_secs_f64(), ); Ok(()) }
candle/candle-examples/examples/quantized-t5/main.rs/0
{ "file_path": "candle/candle-examples/examples/quantized-t5/main.rs", "repo_id": "candle", "token_count": 3631 }
24
# This script exports pre-trained model weights in the safetensors format. import numpy as np import torch import torchvision from safetensors import torch as stt m = torchvision.models.resnet50(pretrained=True) stt.save_file(m.state_dict(), 'resnet50.safetensors') m = torchvision.models.resnet101(pretrained=True) stt.save_file(m.state_dict(), 'resnet101.safetensors') m = torchvision.models.resnet152(pretrained=True) stt.save_file(m.state_dict(), 'resnet152.safetensors')
candle/candle-examples/examples/resnet/export_models.py/0
{ "file_path": "candle/candle-examples/examples/resnet/export_models.py", "repo_id": "candle", "token_count": 166 }
25
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::{Parser, ValueEnum}; use candle::{DType, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::models::trocr; use tokenizers::Tokenizer; mod image_processor; #[derive(Clone, Debug, Copy, ValueEnum)] enum Which { Base, Large, } #[derive(Parser, Debug)] struct Args { #[arg(long)] model: Option<String>, /// Choose the variant of the model to run. #[arg(long, default_value = "base")] which: Which, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Text to be translated #[arg(long)] image: String, } pub fn main() -> anyhow::Result<()> { use hf_hub::api::sync::Api; let args = Args::parse(); let tokenizer_dec = { let tokenizer = Api::new()? .model(String::from("ToluClassics/candle-trocr-tokenizer")) .get("tokenizer.json")?; Tokenizer::from_file(&tokenizer).map_err(E::msg)? }; let mut tokenizer_dec = TokenOutputStream::new(tokenizer_dec); let device = candle_examples::device(args.cpu)?; let vb = { let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => match args.which { Which::Base => Api::new()? .repo(hf_hub::Repo::with_revision( "microsoft/trocr-base-handwritten".to_string(), hf_hub::RepoType::Model, "refs/pr/3".to_string(), )) .get("model.safetensors")?, Which::Large => Api::new()? .repo(hf_hub::Repo::with_revision( "microsoft/trocr-large-handwritten".to_string(), hf_hub::RepoType::Model, "refs/pr/6".to_string(), )) .get("model.safetensors")?, }, }; println!("model: {:?}", model); unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? } }; let encoder_config = match args.which { Which::Base => candle_transformers::models::vit::Config::microsoft_trocr_base_handwritten(), Which::Large => { candle_transformers::models::vit::Config::microsoft_trocr_base_handwritten() } }; let decoder_config = trocr::TrOCRConfig::default(); let mut model = trocr::TrOCRModel::new(&encoder_config, &decoder_config, vb)?; let config = image_processor::ProcessorConfig::default(); let processor = image_processor::ViTImageProcessor::new(&config); let image = vec![args.image.as_str()]; let image = processor.preprocess(image)?; let encoder_xs = model.encoder().forward(&image)?; let mut logits_processor = candle_transformers::generation::LogitsProcessor::new(1337, None, None); let mut token_ids: Vec<u32> = vec![decoder_config.decoder_start_token_id]; for index in 0..1000 { let context_size = if index >= 1 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = model.decode(&input_ids, &encoder_xs, start_pos)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; token_ids.push(token); if let Some(t) = tokenizer_dec.next_token(token)? { use std::io::Write; print!("{t}"); std::io::stdout().flush()?; } if token == decoder_config.eos_token_id { break; } } if let Some(rest) = tokenizer_dec.decode_rest().map_err(E::msg)? { print!("{rest}"); } println!(); Ok(()) }
candle/candle-examples/examples/trocr/main.rs/0
{ "file_path": "candle/candle-examples/examples/trocr/main.rs", "repo_id": "candle", "token_count": 1880 }
26
use candle::{DType, Device, IndexOp, Result, Tensor}; use candle_nn::{batch_norm, conv2d, conv2d_no_bias, Func, Module, VarBuilder}; use std::collections::BTreeMap; use std::fs::File; use std::io::{BufRead, BufReader}; use std::path::Path; #[derive(Debug)] struct Block { block_type: String, parameters: BTreeMap<String, String>, } impl Block { fn get(&self, key: &str) -> Result<&str> { match self.parameters.get(&key.to_string()) { None => candle::bail!("cannot find {} in {}", key, self.block_type), Some(value) => Ok(value), } } } #[derive(Debug)] pub struct Darknet { blocks: Vec<Block>, parameters: BTreeMap<String, String>, } impl Darknet { fn get(&self, key: &str) -> Result<&str> { match self.parameters.get(&key.to_string()) { None => candle::bail!("cannot find {} in net parameters", key), Some(value) => Ok(value), } } } struct Accumulator { block_type: Option<String>, parameters: BTreeMap<String, String>, net: Darknet, } impl Accumulator { fn new() -> Accumulator { Accumulator { block_type: None, parameters: BTreeMap::new(), net: Darknet { blocks: vec![], parameters: BTreeMap::new(), }, } } fn finish_block(&mut self) { match &self.block_type { None => (), Some(block_type) => { if block_type == "net" { self.net.parameters = self.parameters.clone(); } else { let block = Block { block_type: block_type.to_string(), parameters: self.parameters.clone(), }; self.net.blocks.push(block); } self.parameters.clear(); } } self.block_type = None; } } pub fn parse_config<T: AsRef<Path>>(path: T) -> Result<Darknet> { let file = File::open(path.as_ref())?; let mut acc = Accumulator::new(); for line in BufReader::new(file).lines() { let line = line?; if line.is_empty() || line.starts_with('#') { continue; } let line = line.trim(); if line.starts_with('[') { if !line.ends_with(']') { candle::bail!("line does not end with ']' {line}") } let line = &line[1..line.len() - 1]; acc.finish_block(); acc.block_type = Some(line.to_string()); } else { let key_value: Vec<&str> = line.splitn(2, '=').collect(); if key_value.len() != 2 { candle::bail!("missing equal {line}") } let prev = acc.parameters.insert( key_value[0].trim().to_owned(), key_value[1].trim().to_owned(), ); if prev.is_some() { candle::bail!("multiple value for key {}", line) } } } acc.finish_block(); Ok(acc.net) } enum Bl { Layer(Box<dyn candle_nn::Module + Send + Sync>), Route(Vec<usize>), Shortcut(usize), Yolo(usize, Vec<(usize, usize)>), } fn conv(vb: VarBuilder, index: usize, p: usize, b: &Block) -> Result<(usize, Bl)> { let activation = b.get("activation")?; let filters = b.get("filters")?.parse::<usize>()?; let pad = b.get("pad")?.parse::<usize>()?; let size = b.get("size")?.parse::<usize>()?; let stride = b.get("stride")?.parse::<usize>()?; let padding = if pad != 0 { (size - 1) / 2 } else { 0 }; let (bn, bias) = match b.parameters.get("batch_normalize") { Some(p) if p.parse::<usize>()? != 0 => { let bn = batch_norm(filters, 1e-5, vb.pp(&format!("batch_norm_{index}")))?; (Some(bn), false) } Some(_) | None => (None, true), }; let conv_cfg = candle_nn::Conv2dConfig { stride, padding, groups: 1, dilation: 1, }; let conv = if bias { conv2d(p, filters, size, conv_cfg, vb.pp(&format!("conv_{index}")))? } else { conv2d_no_bias(p, filters, size, conv_cfg, vb.pp(&format!("conv_{index}")))? }; let leaky = match activation { "leaky" => true, "linear" => false, otherwise => candle::bail!("unsupported activation {}", otherwise), }; let func = candle_nn::func(move |xs| { let xs = conv.forward(xs)?; let xs = match &bn { Some(bn) => xs.apply_t(bn, false)?, None => xs, }; let xs = if leaky { xs.maximum(&(&xs * 0.1)?)? } else { xs }; Ok(xs) }); Ok((filters, Bl::Layer(Box::new(func)))) } fn upsample(prev_channels: usize) -> Result<(usize, Bl)> { let layer = candle_nn::func(|xs| { let (_n, _c, h, w) = xs.dims4()?; xs.upsample_nearest2d(2 * h, 2 * w) }); Ok((prev_channels, Bl::Layer(Box::new(layer)))) } fn int_list_of_string(s: &str) -> Result<Vec<i64>> { let res: std::result::Result<Vec<_>, _> = s.split(',').map(|xs| xs.trim().parse::<i64>()).collect(); Ok(res?) } fn usize_of_index(index: usize, i: i64) -> usize { if i >= 0 { i as usize } else { (index as i64 + i) as usize } } fn route(index: usize, p: &[(usize, Bl)], block: &Block) -> Result<(usize, Bl)> { let layers = int_list_of_string(block.get("layers")?)?; let layers: Vec<usize> = layers .into_iter() .map(|l| usize_of_index(index, l)) .collect(); let channels = layers.iter().map(|&l| p[l].0).sum(); Ok((channels, Bl::Route(layers))) } fn shortcut(index: usize, p: usize, block: &Block) -> Result<(usize, Bl)> { let from = block.get("from")?.parse::<i64>()?; Ok((p, Bl::Shortcut(usize_of_index(index, from)))) } fn yolo(p: usize, block: &Block) -> Result<(usize, Bl)> { let classes = block.get("classes")?.parse::<usize>()?; let flat = int_list_of_string(block.get("anchors")?)?; if flat.len() % 2 != 0 { candle::bail!("even number of anchors"); } let flat = flat.into_iter().map(|i| i as usize).collect::<Vec<_>>(); let anchors: Vec<_> = (0..(flat.len() / 2)) .map(|i| (flat[2 * i], flat[2 * i + 1])) .collect(); let mask = int_list_of_string(block.get("mask")?)?; let anchors = mask.into_iter().map(|i| anchors[i as usize]).collect(); Ok((p, Bl::Yolo(classes, anchors))) } fn detect( xs: &Tensor, image_height: usize, classes: usize, anchors: &Vec<(usize, usize)>, ) -> Result<Tensor> { let (bsize, _channels, height, _width) = xs.dims4()?; let stride = image_height / height; let grid_size = image_height / stride; let bbox_attrs = 5 + classes; let nanchors = anchors.len(); let xs = xs .reshape((bsize, bbox_attrs * nanchors, grid_size * grid_size))? .transpose(1, 2)? .contiguous()? .reshape((bsize, grid_size * grid_size * nanchors, bbox_attrs))?; let grid = Tensor::arange(0u32, grid_size as u32, &Device::Cpu)?; let a = grid.repeat((grid_size, 1))?; let b = a.t()?.contiguous()?; let x_offset = a.flatten_all()?.unsqueeze(1)?; let y_offset = b.flatten_all()?.unsqueeze(1)?; let xy_offset = Tensor::cat(&[&x_offset, &y_offset], 1)? .repeat((1, nanchors))? .reshape((grid_size * grid_size * nanchors, 2))? .unsqueeze(0)? .to_dtype(DType::F32)?; let anchors: Vec<f32> = anchors .iter() .flat_map(|&(x, y)| vec![x as f32 / stride as f32, y as f32 / stride as f32].into_iter()) .collect(); let anchors = Tensor::new(anchors.as_slice(), &Device::Cpu)? .reshape((anchors.len() / 2, 2))? .repeat((grid_size * grid_size, 1))? .unsqueeze(0)?; let ys02 = xs.i((.., .., 0..2))?; let ys24 = xs.i((.., .., 2..4))?; let ys4 = xs.i((.., .., 4..))?; let ys02 = (candle_nn::ops::sigmoid(&ys02)?.add(&xy_offset)? * stride as f64)?; let ys24 = (ys24.exp()?.mul(&anchors)? * stride as f64)?; let ys4 = candle_nn::ops::sigmoid(&ys4)?; let ys = Tensor::cat(&[ys02, ys24, ys4], 2)?; Ok(ys) } impl Darknet { pub fn height(&self) -> Result<usize> { let image_height = self.get("height")?.parse::<usize>()?; Ok(image_height) } pub fn width(&self) -> Result<usize> { let image_width = self.get("width")?.parse::<usize>()?; Ok(image_width) } pub fn build_model(&self, vb: VarBuilder) -> Result<Func> { let mut blocks: Vec<(usize, Bl)> = vec![]; let mut prev_channels: usize = 3; for (index, block) in self.blocks.iter().enumerate() { let channels_and_bl = match block.block_type.as_str() { "convolutional" => conv(vb.pp(&index.to_string()), index, prev_channels, block)?, "upsample" => upsample(prev_channels)?, "shortcut" => shortcut(index, prev_channels, block)?, "route" => route(index, &blocks, block)?, "yolo" => yolo(prev_channels, block)?, otherwise => candle::bail!("unsupported block type {}", otherwise), }; prev_channels = channels_and_bl.0; blocks.push(channels_and_bl); } let image_height = self.height()?; let func = candle_nn::func(move |xs| { let mut prev_ys: Vec<Tensor> = vec![]; let mut detections: Vec<Tensor> = vec![]; for (_, b) in blocks.iter() { let ys = match b { Bl::Layer(l) => { let xs = prev_ys.last().unwrap_or(xs); l.forward(xs)? } Bl::Route(layers) => { let layers: Vec<_> = layers.iter().map(|&i| &prev_ys[i]).collect(); Tensor::cat(&layers, 1)? } Bl::Shortcut(from) => (prev_ys.last().unwrap() + prev_ys.get(*from).unwrap())?, Bl::Yolo(classes, anchors) => { let xs = prev_ys.last().unwrap_or(xs); detections.push(detect(xs, image_height, *classes, anchors)?); Tensor::new(&[0u32], &Device::Cpu)? } }; prev_ys.push(ys); } Tensor::cat(&detections, 1) }); Ok(func) } }
candle/candle-examples/examples/yolo-v3/darknet.rs/0
{ "file_path": "candle/candle-examples/examples/yolo-v3/darknet.rs", "repo_id": "candle", "token_count": 5405 }
27
# candle-flash-attn
candle/candle-flash-attn/README.md/0
{ "file_path": "candle/candle-flash-attn/README.md", "repo_id": "candle", "token_count": 8 }
28
use anyhow::Result; use candle::{DType, Device, IndexOp, Tensor, D}; fn to_vec3_round(t: Tensor, digits: i32) -> Result<Vec<Vec<Vec<f32>>>> { let b = 10f32.powi(digits); let t = t.to_vec3::<f32>()?; let t = t .iter() .map(|t| { t.iter() .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) .collect() }) .collect(); Ok(t) } fn fa_acausal(q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32) -> Result<Tensor> { let in_dtype = q.dtype(); let q = q.to_dtype(DType::F32)?; let k = k.to_dtype(DType::F32)?; let v = v.to_dtype(DType::F32)?; let att = (q.matmul(&k.t()?)? * softmax_scale as f64)?; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let output = att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?; Ok(output) } #[test] fn flash_attn_acausal() -> Result<()> { let device = Device::new_cuda(0)?; let q = Tensor::arange(0u32, 48, &device)? .to_dtype(DType::F16)? .reshape((1, 3, 2, 8))?; let k = (&q / 40.)?; let v = (&q / 50.)?; let q = (&q / 30.)?; let ys1 = fa_acausal(&q, &k, &v, 0.5)?; let ys1 = ys1.i(0)?.to_dtype(DType::F32)?; let ys2 = { let q = q.transpose(1, 2)?; let k = k.transpose(1, 2)?; let v = v.transpose(1, 2)?; candle_flash_attn::flash_attn(&q, &k, &v, 0.5, false)?.transpose(1, 2)? }; let ys2 = ys2.i(0)?.to_dtype(DType::F32)?; let diff = ys1.sub(&ys2)?.abs()?.flatten_all()?.max(0)?; assert_eq!(ys1.dims(), &[3, 2, 8]); assert_eq!( to_vec3_round(ys1, 4)?, &[ [ [0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238], [0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322] ], [ [0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605], [0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684] ], [ [0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955], [0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023] ] ] ); assert_eq!(ys2.dims(), &[3, 2, 8]); assert_eq!( to_vec3_round(ys2, 4)?, &[ [ [0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238], [0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322] ], [ [0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605], [0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684] ], [ [0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955], [0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023] ] ] ); assert!(diff.to_vec0::<f32>()?.abs() < 1e-5); Ok(()) } #[test] fn flash_attn_varlen() -> Result<()> { let device = Device::new_cuda(0)?; let q = Tensor::arange(0u32, 48, &device)? .to_dtype(DType::F16)? .reshape((3, 2, 8))?; let k = (&q / 40.)?; let v = (&q / 50.)?; let q = (&q / 30.)?; let seqlens_q = Tensor::new(&[0u32, 2u32], &device)?; let seqlens_k = Tensor::new(&[0u32, 2u32], &device)?; let ys = { let q = q.transpose(0, 1)?; let k = k.transpose(0, 1)?; let v = v.transpose(0, 1)?; candle_flash_attn::flash_attn_varlen( &q, &k, &v, &seqlens_q, &seqlens_k, 32, 32, 0.5, false, )? .transpose(0, 1)? }; let ys = ys.to_dtype(DType::F32)?; assert_eq!(ys.dims(), &[3, 2, 8]); assert_eq!( to_vec3_round(ys, 4)?, &[ [ [0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238], [0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322] ], [ [0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605], [0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684] ], [ [0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955], [0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023] ] ] ); Ok(()) }
candle/candle-flash-attn/tests/flash_attn_tests.rs/0
{ "file_path": "candle/candle-flash-attn/tests/flash_attn_tests.rs", "repo_id": "candle", "token_count": 2787 }
29
#define _USE_MATH_DEFINES #include<math.h> #include<stdint.h> #include "cuda_utils.cuh" #define UNARY_OP(TYPENAME, FN_NAME, FUNC) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const TYPENAME *inp, \ TYPENAME *out \ ) { \ const size_t *dims = info; \ const size_t *strides = info + num_dims; \ if (is_contiguous(num_dims, dims, strides)) { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ TYPENAME x = inp ? inp[i] : out[i]; \ out[i] = FUNC; \ } \ } \ else { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \ TYPENAME x = inp ? inp[strided_i] : out[i]; \ out[i] = FUNC; \ } \ } \ } \ template<typename T> __device__ __forceinline__ T gelu_erf_fwd(T x) { return x * normcdfg(x); } template<typename T> __device__ __forceinline__ T gelu_fwd(T x) { T x_sq = x * x; T x_cube = x_sq * x; T alpha = x + static_cast<T>(0.044715) * x_cube; return static_cast<T>(0.5) * x * (static_cast<T>(1.0) + tanhg(static_cast<T>(M_2_SQRTPI * M_SQRT1_2) * alpha)); } template<typename T> __device__ __forceinline__ T elu_fwd(T x, T alpha) { if (x > static_cast<T>(0)) { return x; } return alpha * (expg(x) - static_cast<T>(1)); } template<typename T> __device__ __forceinline__ T relu_fwd(T x) { T zero = 0.; return maxg(x, zero); } #define UNARY_OP1(TYPENAME, FN_NAME, FUNC) \ extern "C" __global__ void FN_NAME( \ const size_t numel, \ const size_t num_dims, \ const size_t *info, \ const TYPENAME param, \ const TYPENAME *inp, \ TYPENAME *out \ ) { \ const size_t *dims = info; \ const size_t *strides = info + num_dims; \ if (is_contiguous(num_dims, dims, strides)) { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ TYPENAME x = inp ? inp[i] : out[i]; \ out[i] = FUNC; \ } \ } \ else { \ for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) { \ unsigned strided_i = get_strided_index(i, num_dims, dims, strides); \ TYPENAME x = inp ? inp[strided_i] : out[i]; \ out[i] = FUNC; \ } \ } \ } \ #if __CUDA_ARCH__ >= 800 UNARY_OP(__nv_bfloat16, ucopy_bf16, x) UNARY_OP(__nv_bfloat16, uneg_bf16, -x) UNARY_OP(__nv_bfloat16, urecip_bf16, recipg(x)) UNARY_OP(__nv_bfloat16, uexp_bf16, expg(x)) UNARY_OP(__nv_bfloat16, ulog_bf16, logg(x)) UNARY_OP(__nv_bfloat16, usin_bf16, sing(x)) UNARY_OP(__nv_bfloat16, ucos_bf16, cosg(x)) UNARY_OP(__nv_bfloat16, utanh_bf16, tanhg(x)) UNARY_OP(__nv_bfloat16, uerf_bf16, erfg(x)) UNARY_OP(__nv_bfloat16, uceil_bf16, ceilg(x)) UNARY_OP(__nv_bfloat16, ufloor_bf16, floorg(x)) UNARY_OP(__nv_bfloat16, uround_bf16, roundg(x)) UNARY_OP(__nv_bfloat16, unormcdf_bf16, normcdfg(x)) UNARY_OP(__nv_bfloat16, uabs_bf16, absg(x)) UNARY_OP(__nv_bfloat16, usqr_bf16, x*x) UNARY_OP(__nv_bfloat16, usqrt_bf16, sqrtg(x)) UNARY_OP(__nv_bfloat16, ugelu_bf16, gelu_fwd(x)) UNARY_OP(__nv_bfloat16, ugelu_erf_bf16, gelu_erf_fwd(x)) UNARY_OP(__nv_bfloat16, urelu_bf16, relu_fwd(x)) UNARY_OP1(__nv_bfloat16, uelu_bf16, elu_fwd(x, param)) UNARY_OP1(__nv_bfloat16, upowf_bf16, powg(x, param)) #endif #if __CUDA_ARCH__ >= 530 UNARY_OP(__half, ucopy_f16, x) UNARY_OP(__half, uneg_f16, -x) UNARY_OP(__half, urecip_f16, recipg(x)) UNARY_OP(__half, uexp_f16, expg(x)) UNARY_OP(__half, ulog_f16, logg(x)) UNARY_OP(__half, usin_f16, sing(x)) UNARY_OP(__half, ucos_f16, cosg(x)) UNARY_OP(__half, utanh_f16, tanhg(x)) UNARY_OP(__half, uerf_f16, erfg(x)) UNARY_OP(__half, uceil_f16, ceilg(x)) UNARY_OP(__half, ufloor_f16, floorg(x)) UNARY_OP(__half, uround_f16, roundg(x)) UNARY_OP(__half, unormcdf_f16, normcdfg(x)) UNARY_OP(__half, uabs_f16, absg(x)) UNARY_OP(__half, usqr_f16, x*x) UNARY_OP(__half, usqrt_f16, sqrtg(x)) UNARY_OP(__half, ugelu_f16, gelu_fwd(x)) UNARY_OP(__half, ugelu_erf_f16, gelu_erf_fwd(x)) UNARY_OP(__half, urelu_f16, relu_fwd(x)) UNARY_OP1(__half, uelu_f16, elu_fwd(x, param)) UNARY_OP1(__half, upowf_f16, powg(x, param)) #endif UNARY_OP(uint8_t, ucopy_u8, x) UNARY_OP(uint32_t, ucopy_u32, x) UNARY_OP(int64_t, ucopy_i64, x) UNARY_OP(float, ucopy_f32, x) UNARY_OP(double, ucopy_f64, x) UNARY_OP(float, uneg_f32, -x) UNARY_OP(double, uneg_f64, -x) UNARY_OP(float, urecip_f32, recipg(x)) UNARY_OP(double, urecip_f64, recipg(x)) UNARY_OP(float, uexp_f32, expg(x)) UNARY_OP(double, uexp_f64, expg(x)) UNARY_OP(float, ulog_f32, logg(x)) UNARY_OP(double, ulog_f64, logg(x)) UNARY_OP(float, usin_f32, sing(x)) UNARY_OP(double, usin_f64, sing(x)) UNARY_OP(float, ucos_f32, cosg(x)) UNARY_OP(double, ucos_f64, cosg(x)) UNARY_OP(float, utanh_f32, tanhg(x)) UNARY_OP(double, utanh_f64, tanhg(x)) UNARY_OP(float, uerf_f32, erfg(x)) UNARY_OP(double, uerf_f64, erfg(x)) UNARY_OP(float, uceil_f32, ceilg(x)) UNARY_OP(double, uceil_f64, ceilg(x)) UNARY_OP(float, ufloor_f32, floorg(x)) UNARY_OP(double, ufloor_f64, floorg(x)) UNARY_OP(float, uround_f32, roundg(x)) UNARY_OP(double, uround_f64, roundg(x)) UNARY_OP(float, unormcdf_f32, normcdfg(x)) UNARY_OP(double, unormcdf_f64, normcdfg(x)) UNARY_OP(float, uabs_f32, absg(x)) UNARY_OP(double, uabs_f64, absg(x)) UNARY_OP(float, usqr_f32, x*x) UNARY_OP(double, usqr_f64, x*x) UNARY_OP(float, usqrt_f32, sqrtg(x)) UNARY_OP(double, usqrt_f64, sqrtg(x)) UNARY_OP(float, ugelu_f32, gelu_fwd(x)) UNARY_OP(double, ugelu_f64, gelu_fwd(x)) UNARY_OP(float, ugelu_erf_f32, gelu_erf_fwd(x)) UNARY_OP(double, ugelu_erf_f64, gelu_erf_fwd(x)) UNARY_OP(float, urelu_f32, relu_fwd(x)) UNARY_OP(double, urelu_f64, relu_fwd(x)) UNARY_OP1(float, uelu_f32, elu_fwd(x, param)) UNARY_OP1(double, uelu_f64, elu_fwd(x, param)) UNARY_OP1(float, upowf_f32, powg(x, param)) UNARY_OP1(double, upowf_f64, powg(x, param))
candle/candle-kernels/src/unary.cu/0
{ "file_path": "candle/candle-kernels/src/unary.cu", "repo_id": "candle", "token_count": 3226 }
30
use candle_metal_kernels::{call_affine, Kernels}; use metal::objc::rc::autoreleasepool; use metal::{Device, MTLResourceOptions}; use rand; use std::any::type_name; use std::time::Instant; fn main() { let device = Device::system_default().unwrap(); let kernels = Kernels::new(); let f32_1k = (0..1000).map(|_| rand::random::<f32>()).collect::<Vec<_>>(); let f32_10k = (0..10000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); let f32_100k = (0..100000) .map(|_| rand::random::<f32>()) .collect::<Vec<_>>(); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11} | {5: <11}", "dtype", "kernel", "size", "runs", "total time", "avg time" ); // f32 run_affine_bench(&device, &kernels, &f32_1k); run_affine_bench(&device, &kernels, &f32_10k); run_affine_bench(&device, &kernels, &f32_100k); } fn run_affine_bench<T: Clone>(device: &Device, kernels: &Kernels, v: &[T]) { let command_queue = device.new_command_queue(); let options = MTLResourceOptions::StorageModeManaged; let iterations = 10000; let input = device.new_buffer_with_data( v.as_ptr() as *const core::ffi::c_void, core::mem::size_of_val(v) as u64, options, ); let mut output = device.new_buffer(core::mem::size_of_val(v) as u64, options); let mul: f32 = 1.2345; let add: f32 = 2.3456; let total_time = autoreleasepool(|| { let command_buffer = command_queue.new_command_buffer(); let start = Instant::now(); for _ in 0..iterations { call_affine( &device, command_buffer, &kernels, "affine_float", v.len(), &input, &mut output, mul, add, ) .unwrap(); } command_buffer.commit(); command_buffer.wait_until_completed(); start.elapsed() }); println!( "{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}", type_name::<T>().split("::").last().unwrap(), "affine", v.len(), iterations, total_time, total_time / iterations ); }
candle/candle-metal-kernels/tmp/affine.rs/0
{ "file_path": "candle/candle-metal-kernels/tmp/affine.rs", "repo_id": "candle", "token_count": 1154 }
31
//! Layer Normalization. //! //! This layer applies Layer Normalization over a mini-batch of inputs as described in [`Layer //! Normalization`]. The input is expected to have three dimensions: a batch dimension, a length, //! and a hidden size, the normalization is applied over the last dimension. //! //! # Example //! //! ```rust //! use candle::{Tensor, Device::Cpu, test_utils::to_vec3_round}; //! use candle_nn::{LayerNorm, Module}; //! # fn main() -> candle::Result<()> { //! //! let w = Tensor::new(1f32, &Cpu)?; //! let b = Tensor::new(0f32, &Cpu)?; //! let layer = LayerNorm::new(w, b, 1e-5); //! //! let xs = Tensor::new( //! &[[[1f32, 2., 3.], [4., 5., 6.], [9., 8., 7.]]], //! &Cpu)?; //! let ys = layer.forward(&xs)?; //! assert_eq!( //! to_vec3_round(&ys, 4)?, //! &[[[-1.2247, 0.0, 1.2247], //! [-1.2247, 0.0, 1.2247], //! [ 1.2247, 0.0, -1.2247]]]); //! # Ok(()) } //! ``` //! //! [`Layer Normalization`]: https://arxiv.org/abs/1607.06450 use candle::{DType, Result, Tensor, D}; #[derive(Debug, Clone, Copy, PartialEq)] pub struct LayerNormConfig { pub eps: f64, /// Whether to remove the mean or not, the default is true and when set to false, this turns /// this layer into RmsNorm. pub remove_mean: bool, pub affine: bool, } impl Default for LayerNormConfig { fn default() -> Self { Self { eps: 1e-5, remove_mean: true, affine: true, } } } impl From<f64> for LayerNormConfig { fn from(eps: f64) -> Self { Self { eps, remove_mean: true, affine: true, } } } // This layer norm version handles both weight and bias so removes the mean. #[derive(Clone, Debug)] pub struct LayerNorm { weight: Tensor, bias: Option<Tensor>, remove_mean: bool, eps: f64, } impl LayerNorm { pub fn new(weight: Tensor, bias: Tensor, eps: f64) -> Self { Self { weight, bias: Some(bias), remove_mean: true, eps, } } pub fn new_no_bias(weight: Tensor, eps: f64) -> Self { Self { weight, bias: None, remove_mean: true, eps, } } pub fn rms_norm(weight: Tensor, eps: f64) -> Self { Self { weight, bias: None, remove_mean: false, eps, } } pub fn weight(&self) -> &Tensor { &self.weight } pub fn bias(&self) -> Option<&Tensor> { self.bias.as_ref() } } impl crate::Module for LayerNorm { fn forward(&self, x: &Tensor) -> Result<Tensor> { let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = x.dim(D::Minus1)?; let x = x.to_dtype(internal_dtype)?; let x = if self.remove_mean { let mean_x = (x.sum_keepdim(D::Minus1)? / hidden_size as f64)?; x.broadcast_sub(&mean_x)? } else { x }; let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?; let x = x_normed.to_dtype(x_dtype)?.broadcast_mul(&self.weight)?; match &self.bias { None => Ok(x), Some(bias) => x.broadcast_add(bias), } } } pub fn layer_norm<C: Into<LayerNormConfig>>( size: usize, config: C, vb: crate::VarBuilder, ) -> Result<LayerNorm> { let config = config.into(); let weight = vb.get_with_hints(size, "weight", crate::Init::Const(1.))?; let bias = if config.affine { Some(vb.get_with_hints(size, "bias", crate::Init::Const(0.))?) } else { None }; Ok(LayerNorm { weight, bias, remove_mean: config.remove_mean, eps: config.eps, }) } /// RmsNorm is a specialized version of the LayerNorm module. #[derive(Clone, Debug)] pub struct RmsNorm(LayerNorm); impl RmsNorm { pub fn new(weight: Tensor, eps: f64) -> Self { Self(LayerNorm::rms_norm(weight, eps)) } pub fn into_inner(self) -> LayerNorm { self.0 } } impl crate::Module for RmsNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.0.forward(xs) } } pub fn rms_norm(size: usize, eps: f64, vb: crate::VarBuilder) -> Result<RmsNorm> { let config = LayerNormConfig { eps, remove_mean: false, affine: false, }; Ok(RmsNorm(layer_norm(size, config, vb)?)) }
candle/candle-nn/src/layer_norm.rs/0
{ "file_path": "candle/candle-nn/src/layer_norm.rs", "repo_id": "candle", "token_count": 2263 }
32
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::test_utils::{to_vec0_round, to_vec2_round}; use anyhow::Result; use candle::{Device, Tensor, Var}; use candle_nn::{AdamW, Linear, Module, Optimizer, ParamsAdamW, SGD}; #[test] fn sgd_optim() -> Result<()> { let x = Var::new(0f32, &Device::Cpu)?; let mut sgd = SGD::new(vec![x.clone()], 0.1)?; let xt = x.as_tensor(); for _step in 0..100 { let loss = ((xt - 4.2)? * (xt - 4.2)?)?; sgd.backward_step(&loss)? } assert_eq!(x.to_scalar::<f32>()?, 4.199999); Ok(()) } /* The results of this test have been checked against the following PyTorch code. import torch from torch import optim w_gen = torch.tensor([[3., 1.]]) b_gen = torch.tensor([-2.]) sample_xs = torch.tensor([[2., 1.], [7., 4.], [-4., 12.], [5., 8.]]) sample_ys = sample_xs.matmul(w_gen.t()) + b_gen m = torch.nn.Linear(2, 1) with torch.no_grad(): m.weight.zero_() m.bias.zero_() optimizer = optim.SGD(m.parameters(), lr=0.004, momentum=0.) for _step in range(1000): optimizer.zero_grad() ys = m(sample_xs) loss = ((ys - sample_ys)**2).sum() loss.backward() optimizer.step() print(m.weight) print(m.bias) */ #[test] fn sgd_linear_regression() -> Result<()> { // Generate some linear data, y = 3.x1 + x2 - 2. let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; let b_gen = Tensor::new(-2f32, &Device::Cpu)?; let gen = Linear::new(w_gen, Some(b_gen)); let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; let sample_ys = gen.forward(&sample_xs)?; // Now use backprop to run a linear regression between samples and get the coefficients back. let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?; let b = Var::new(0f32, &Device::Cpu)?; let mut sgd = SGD::new(vec![w.clone(), b.clone()], 0.004)?; let lin = Linear::new(w.as_tensor().clone(), Some(b.as_tensor().clone())); for _step in 0..1000 { let ys = lin.forward(&sample_xs)?; let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; sgd.backward_step(&loss)?; } assert_eq!(w.to_vec2::<f32>()?, &[[2.9983196, 0.99790204]]); assert_eq!(b.to_scalar::<f32>()?, -1.9796902); Ok(()) } /* The following test returns the same values as the PyTorch code below. import torch from torch import optim w_gen = torch.tensor([[3., 1.]]) b_gen = torch.tensor([-2.]) sample_xs = torch.tensor([[2., 1.], [7., 4.], [-4., 12.], [5., 8.]]) sample_ys = sample_xs.matmul(w_gen.t()) + b_gen m = torch.nn.Linear(2, 1) with torch.no_grad(): m.weight.zero_() m.bias.zero_() optimizer = optim.AdamW(m.parameters(), lr=0.1) for _step in range(100): optimizer.zero_grad() ys = m(sample_xs) loss = ((ys - sample_ys)**2).sum() loss.backward() optimizer.step() print(m.weight) print(m.bias) */ #[test] fn adamw_linear_regression() -> Result<()> { let w_gen = Tensor::new(&[[3f32, 1.]], &Device::Cpu)?; let b_gen = Tensor::new(-2f32, &Device::Cpu)?; let gen = Linear::new(w_gen, Some(b_gen)); let sample_xs = Tensor::new(&[[2f32, 1.], [7., 4.], [-4., 12.], [5., 8.]], &Device::Cpu)?; let sample_ys = gen.forward(&sample_xs)?; // Now use backprop to run a linear regression between samples and get the coefficients back. let w = Var::new(&[[0f32, 0.]], &Device::Cpu)?; let b = Var::new(0f32, &Device::Cpu)?; let params = ParamsAdamW { lr: 0.1, ..Default::default() }; let mut opt = AdamW::new(vec![w.clone(), b.clone()], params)?; let lin = Linear::new(w.as_tensor().clone(), Some(b.as_tensor().clone())); for _step in 0..100 { let ys = lin.forward(&sample_xs)?; let loss = ys.sub(&sample_ys)?.sqr()?.sum_all()?; opt.backward_step(&loss)?; } assert_eq!(to_vec2_round(w.as_tensor(), 4)?, &[[2.7257, 0.7097]]); assert_eq!(to_vec0_round(b.as_tensor(), 4)?, 0.7873); Ok(()) }
candle/candle-nn/tests/optim.rs/0
{ "file_path": "candle/candle-nn/tests/optim.rs", "repo_id": "candle", "token_count": 1886 }
33
import logging try: from .candle import * except ImportError as e: # If we are in development mode, or we did not bundle the DLLs, we try to locate them here # PyO3 wont give us any information about what DLLs are missing, so we can only try to load # the DLLs and re-import the module logging.warning("DLLs were not bundled with this package. Trying to locate them...") import os import platform def locate_cuda_dlls(): logging.warning("Locating CUDA DLLs...") # Try to locate CUDA_PATH environment variable cuda_path = os.environ.get("CUDA_PATH", None) if cuda_path: logging.warning(f"Found CUDA_PATH environment variable: {cuda_path}") if platform.system() == "Windows": cuda_path = os.path.join(cuda_path, "bin") else: cuda_path = os.path.join(cuda_path, "lib64") logging.warning(f"Adding {cuda_path} to DLL search path...") os.add_dll_directory(cuda_path) else: logging.warning("CUDA_PATH environment variable not found!") def locate_mkl_dlls(): # Try to locate ONEAPI_ROOT environment variable oneapi_root = os.environ.get("ONEAPI_ROOT", None) if oneapi_root: if platform.system() == "Windows": mkl_path = os.path.join( oneapi_root, "compiler", "latest", "windows", "redist", "intel64_win", "compiler" ) else: mkl_path = os.path.join(oneapi_root, "mkl", "latest", "lib", "intel64") logging.warning(f"Adding {mkl_path} to DLL search path...") os.add_dll_directory(mkl_path) else: logging.warning("ONEAPI_ROOT environment variable not found!") locate_cuda_dlls() locate_mkl_dlls() try: from .candle import * except ImportError as inner_e: raise ImportError("Could not locate DLLs. Please check the documentation for more information.") __doc__ = candle.__doc__ if hasattr(candle, "__all__"): __all__ = candle.__all__
candle/candle-pyo3/py_src/candle/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/__init__.py", "repo_id": "candle", "token_count": 919 }
34
# Generated content DO NOT EDIT from .. import utils cuda_is_available = utils.cuda_is_available get_num_threads = utils.get_num_threads has_accelerate = utils.has_accelerate has_mkl = utils.has_mkl load_ggml = utils.load_ggml load_gguf = utils.load_gguf load_safetensors = utils.load_safetensors save_gguf = utils.save_gguf save_safetensors = utils.save_safetensors
candle/candle-pyo3/py_src/candle/utils/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/utils/__init__.py", "repo_id": "candle", "token_count": 150 }
35
import candle from candle import Tensor from candle.utils import cuda_is_available from candle.testing import assert_equal import pytest def test_tensor_can_be_constructed(): t = Tensor(42.0) assert t.values() == 42.0 def test_tensor_can_be_constructed_from_list(): t = Tensor([3.0, 1, 4, 1, 5, 9, 2, 6]) assert t.values() == [3.0, 1, 4, 1, 5, 9, 2, 6] def test_tensor_can_be_constructed_from_list_of_lists(): t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]]) assert t.values() == [[3.0, 1, 4, 1], [5, 9, 2, 6]] def test_tensor_can_be_quantized(): t = candle.randn((16, 256)) for format in [ "q4_0", "q4_1", "q5_0", "q5_1", "q8_0", "q2k", "q3k", "q4k", "q5k", "q8k", ]: for formatted_format in [format.upper(), format.lower()]: quant_t = t.quantize(formatted_format) assert quant_t.ggml_dtype.lower() == format.lower() assert quant_t.shape == t.shape def test_tensor_can_be_indexed(): t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]]) assert t[0].values() == [3.0, 1.0, 4.0, 1.0] assert t[1].values() == [5.0, 9.0, 2.0, 6.0] assert t[-1].values() == [5.0, 9.0, 2.0, 6.0] assert t[-2].values() == [3.0, 1.0, 4.0, 1.0] def test_tensor_can_be_sliced(): t = Tensor([3.0, 1, 4, 10, 5, 9, 2, 6]) assert t[0:4].values() == [3.0, 1.0, 4.0, 10.0] assert t[4:8].values() == [5.0, 9.0, 2.0, 6.0] assert t[-4:].values() == [5.0, 9.0, 2.0, 6.0] assert t[:-4].values() == [3.0, 1.0, 4.0, 10.0] assert t[-4:-2].values() == [5.0, 9.0] assert t[...].values() == t.values() def test_tensor_can_be_sliced_2d(): t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]]) assert t[:, 0].values() == [3.0, 5] assert t[:, 1].values() == [1.0, 9.0] assert t[0, 0].values() == 3.0 assert t[:, -1].values() == [1.0, 6.0] assert t[:, -4].values() == [3.0, 5] assert t[..., 0].values() == [3.0, 5] def test_tensor_can_be_scliced_3d(): t = Tensor([[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]) assert t[:, :, 0].values() == [[1, 5], [9, 13]] assert t[:, :, 0:2].values() == [[[1, 2], [5, 6]], [[9, 10], [13, 14]]] assert t[:, 0, 0].values() == [1, 9] assert t[..., 0].values() == [[1, 5], [9, 13]] assert t[..., 0:2].values() == [[[1, 2], [5, 6]], [[9, 10], [13, 14]]] def assert_bool(t: Tensor, expected: bool): assert t.shape == () assert str(t.dtype) == str(candle.u8) assert bool(t.values()) == expected def test_tensor_supports_equality_operations_with_scalars(): t = Tensor(42.0) assert_bool(t == 42.0, True) assert_bool(t == 43.0, False) assert_bool(t != 42.0, False) assert_bool(t != 43.0, True) assert_bool(t > 41.0, True) assert_bool(t > 42.0, False) assert_bool(t >= 41.0, True) assert_bool(t >= 42.0, True) assert_bool(t < 43.0, True) assert_bool(t < 42.0, False) assert_bool(t <= 43.0, True) assert_bool(t <= 42.0, True) def test_tensor_supports_equality_operations_with_tensors(): t = Tensor(42.0) same = Tensor(42.0) other = Tensor(43.0) assert_bool(t == same, True) assert_bool(t == other, False) assert_bool(t != same, False) assert_bool(t != other, True) assert_bool(t > same, False) assert_bool(t > other, False) assert_bool(t >= same, True) assert_bool(t >= other, False) assert_bool(t < same, False) assert_bool(t < other, True) assert_bool(t <= same, True) assert_bool(t <= other, True) def test_tensor_equality_operations_can_broadcast(): # Create a decoder attention mask as a test case # e.g. # [[1,0,0] # [1,1,0] # [1,1,1]] mask_cond = candle.Tensor([0, 1, 2]) mask = mask_cond < (mask_cond + 1).reshape((3, 1)) assert mask.shape == (3, 3) assert_equal(mask, Tensor([[1, 0, 0], [1, 1, 0], [1, 1, 1]]).to_dtype(candle.u8)) def test_tensor_can_be_hashed(): t = Tensor(42.0) other = Tensor(42.0) # Hash should represent a unique tensor assert hash(t) != hash(other) assert hash(t) == hash(t) def test_tensor_can_be_expanded_with_none(): t = candle.rand((12, 12)) b = t[None] assert b.shape == (1, 12, 12) c = t[:, None, None, :] assert c.shape == (12, 1, 1, 12) d = t[None, :, None, :] assert d.shape == (1, 12, 1, 12) e = t[None, None, :, :] assert e.shape == (1, 1, 12, 12) f = t[:, :, None] assert f.shape == (12, 12, 1) def test_tensor_can_be_index_via_tensor(): t = candle.Tensor([[1, 2, 1, 2], [3, 4, 3, 4], [5, 6, 5, 6]]) indexed = t[candle.Tensor([0, 2])] assert indexed.shape == (2, 4) assert indexed.values() == [[1, 2, 1, 2], [5, 6, 5, 6]] indexed = t[:, candle.Tensor([0, 2])] assert indexed.shape == (3, 2) assert indexed.values() == [[1, 1], [3, 3], [5, 5]] def test_tensor_can_be_index_via_list(): t = candle.Tensor([[1, 2, 1, 2], [3, 4, 3, 4], [5, 6, 5, 6]]) indexed = t[[0, 2]] assert indexed.shape == (2, 4) assert indexed.values() == [[1, 2, 1, 2], [5, 6, 5, 6]] indexed = t[:, [0, 2]] assert indexed.shape == (3, 2) assert indexed.values() == [[1, 1], [3, 3], [5, 5]] def test_tensor_can_be_cast_via_to(): t = Tensor(42.0) assert str(t.dtype) == str(candle.f32) t_new_args = t.to(candle.f64) assert str(t_new_args.dtype) == str(candle.f64) t_new_kwargs = t.to(dtype=candle.f64) assert str(t_new_kwargs.dtype) == str(candle.f64) pytest.raises(TypeError, lambda: t.to("not a dtype")) pytest.raises(TypeError, lambda: t.to(dtype="not a dtype")) pytest.raises(TypeError, lambda: t.to(candle.f64, "not a dtype")) pytest.raises(TypeError, lambda: t.to()) pytest.raises(ValueError, lambda: t.to(candle.f16, dtype=candle.f64)) pytest.raises(ValueError, lambda: t.to(candle.f16, candle.f16)) other = Tensor(42.0).to(candle.f64) t_new_other_args = t.to(other) assert str(t_new_other_args.dtype) == str(candle.f64) t_new_other_kwargs = t.to(other=other) assert str(t_new_other_kwargs.dtype) == str(candle.f64) @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_tensor_can_be_moved_via_to(): t = Tensor(42.0) assert t.device == "cpu" t_new_args = t.to("cuda") assert t_new_args.device == "cuda" t_new_kwargs = t.to(device="cuda") assert t_new_kwargs.device == "cuda" pytest.raises(TypeError, lambda: t.to("not a device")) pytest.raises(TypeError, lambda: t.to(device="not a device")) pytest.raises(TypeError, lambda: t.to("cuda", "not a device")) pytest.raises(TypeError, lambda: t.to()) pytest.raises(ValueError, lambda: t.to("cuda", device="cpu")) pytest.raises(ValueError, lambda: t.to("cuda", "cuda")) other = Tensor(42.0).to("cuda") t_new_other_args = t.to(other) assert t_new_other_args.device == "cuda" t_new_other_kwargs = t.to(other=other) assert t_new_other_kwargs.device == "cuda" @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_tensor_can_be_moved_and_cast_via_to(): t = Tensor(42.0) assert t.device == "cpu" assert str(t.dtype) == str(candle.f32) t_new_args = t.to("cuda", candle.f64) assert t_new_args.device == "cuda" assert str(t_new_args.dtype) == str(candle.f64) t_new_kwargs = t.to(device="cuda", dtype=candle.f64) assert t_new_kwargs.device == "cuda" assert str(t_new_kwargs.dtype) == str(candle.f64) other = Tensor(42.0).to("cuda").to(candle.f64) t_new_other_args = t.to(other) assert t_new_other_args.device == "cuda" assert str(t_new_other_args.dtype) == str(candle.f64) t_new_other_kwargs = t.to(other=other) assert t_new_other_kwargs.device == "cuda" assert str(t_new_other_kwargs.dtype) == str(candle.f64) def test_tensor_can_be_added(): t = Tensor(42.0) result = t + t assert result.values() == 84.0 result = t + 2.0 assert result.values() == 44.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_add(b) c = a + b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d + e def test_tensor_can_be_subtracted(): t = Tensor(42.0) result = t - t assert result.values() == 0 result = t - 2.0 assert result.values() == 40.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_sub(b) c = a - b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d - e def test_tensor_can_be_multiplied(): t = Tensor(42.0) result = t * t assert result.values() == 1764.0 result = t * 2.0 assert result.values() == 84.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_mul(b) c = a * b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d * e def test_tensor_can_be_divided(): t = Tensor(42.0) result = t / t assert result.values() == 1.0 result = t / 2.0 assert result.values() == 21.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_div(b) c = a / b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d / e
candle/candle-pyo3/tests/native/test_tensor.py/0
{ "file_path": "candle/candle-pyo3/tests/native/test_tensor.py", "repo_id": "candle", "token_count": 4688 }
36
use super::with_tracing::{linear_no_bias as linear, Linear}; use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{embedding, Embedding, Module, VarBuilder}; use serde::Deserialize; use std::collections::HashMap; use std::sync::{Arc, Mutex}; pub const MAX_SEQ_LEN: usize = 4096; #[derive(Deserialize)] pub struct LlamaConfig { pub hidden_size: usize, pub intermediate_size: usize, pub vocab_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: Option<usize>, pub rms_norm_eps: f64, #[serde(default = "default_rope")] pub rope_theta: f32, } fn default_rope() -> f32 { 10_000.0 } impl LlamaConfig { pub fn into_config(self, use_flash_attn: bool) -> Config { Config { hidden_size: self.hidden_size, intermediate_size: self.intermediate_size, vocab_size: self.vocab_size, num_hidden_layers: self.num_hidden_layers, num_attention_heads: self.num_attention_heads, num_key_value_heads: self.num_key_value_heads.unwrap_or(self.num_attention_heads), rms_norm_eps: self.rms_norm_eps, rope_theta: self.rope_theta, use_flash_attn, } } } pub struct Config { pub hidden_size: usize, pub intermediate_size: usize, pub vocab_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub use_flash_attn: bool, pub rms_norm_eps: f64, pub rope_theta: f32, } impl Config { pub fn config_7b_v1(use_flash_attn: bool) -> Self { Self { hidden_size: 4096, intermediate_size: 11008, vocab_size: 32000, num_hidden_layers: 32, num_attention_heads: 32, num_key_value_heads: 32, use_flash_attn, rms_norm_eps: 1e-6, rope_theta: 10_000.0, } } pub fn config_7b_v2(use_flash_attn: bool) -> Self { Self { hidden_size: 4096, intermediate_size: 11008, vocab_size: 32000, num_hidden_layers: 32, num_attention_heads: 32, num_key_value_heads: 32, use_flash_attn, rms_norm_eps: 1e-5, rope_theta: 10_000.0, } } } #[derive(Clone)] pub struct Cache { masks: Arc<Mutex<HashMap<usize, Tensor>>>, pub use_kv_cache: bool, #[allow(clippy::type_complexity)] kvs: Arc<Mutex<Vec<Option<(Tensor, Tensor)>>>>, cos: Tensor, sin: Tensor, device: Device, } impl Cache { pub fn new(use_kv_cache: bool, dtype: DType, config: &Config, device: &Device) -> Result<Self> { // precompute freqs_cis let n_elem = config.hidden_size / config.num_attention_heads; let theta: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / config.rope_theta.powf(i as f32 / n_elem as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), device)?; let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)? .to_dtype(DType::F32)? .reshape((MAX_SEQ_LEN, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; // This is different from the paper, see: // https://github.com/huggingface/transformers/blob/6112b1c6442aaf7affd2b0676a1cd4eee30c45cf/src/transformers/models/llama/modeling_llama.py#L112 let idx_theta = Tensor::cat(&[&idx_theta, &idx_theta], D::Minus1)?; let cos = idx_theta.cos()?.to_dtype(dtype)?; let sin = idx_theta.sin()?.to_dtype(dtype)?; Ok(Self { masks: Arc::new(Mutex::new(HashMap::new())), use_kv_cache, kvs: Arc::new(Mutex::new(vec![None; config.num_hidden_layers])), device: device.clone(), cos, sin, }) } fn mask(&self, t: usize) -> Result<Tensor> { let mut masks = self.masks.lock().unwrap(); if let Some(mask) = masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), &self.device)?; masks.insert(t, mask.clone()); Ok(mask) } } } struct RmsNorm { inner: candle_nn::RmsNorm, span: tracing::Span, } impl RmsNorm { fn load(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "rms-norm"); let inner = candle_nn::rms_norm(size, eps, vb)?; Ok(Self { inner, span }) } fn forward(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(x) } } struct CausalSelfAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_attention_heads: usize, num_key_value_heads: usize, head_dim: usize, cache: Cache, use_flash_attn: bool, span: tracing::Span, span_rot: tracing::Span, } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let _enter = self.span_rot.enter(); let (b_sz, _, seq_len, hidden_size) = x.dims4()?; let cos = self.cache.cos.narrow(0, index_pos, seq_len)?; let sin = self.cache.sin.narrow(0, index_pos, seq_len)?; let cos = cos.broadcast_as((b_sz, 1, seq_len, hidden_size))?; let sin = sin.broadcast_as((b_sz, 1, seq_len, hidden_size))?; let x1 = x.narrow(D::Minus1, 0, hidden_size / 2)?; let x2 = x.narrow(D::Minus1, hidden_size / 2, hidden_size / 2)?; let rotate_x = Tensor::cat(&[&x2.neg()?, &x1], D::Minus1)?; let rope = (x.broadcast_mul(&cos)? + rotate_x.broadcast_mul(&sin)?)?; Ok(rope) } fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, seq_len, hidden_size) = x.dims3()?; let q = self.q_proj.forward(x)?; let k = self.k_proj.forward(x)?; let v = self.v_proj.forward(x)?; let q = q .reshape((b_sz, seq_len, self.num_attention_heads, self.head_dim))? .transpose(1, 2)?; let k = k .reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))? .transpose(1, 2)?; let mut v = v .reshape((b_sz, seq_len, self.num_key_value_heads, self.head_dim))? .transpose(1, 2)?; let q = self.apply_rotary_emb(&q, index_pos)?; let mut k = self.apply_rotary_emb(&k, index_pos)?; if self.cache.use_kv_cache { let mut cache = self.cache.kvs.lock().unwrap(); if let Some((cache_k, cache_v)) = &cache[block_idx] { k = Tensor::cat(&[cache_k, &k], 2)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 2)?.contiguous()?; let k_seq_len = k.dims()[1]; if k_seq_len > MAX_SEQ_LEN { k = k .narrow(D::Minus1, k_seq_len - MAX_SEQ_LEN, MAX_SEQ_LEN)? .contiguous()? } let v_seq_len = v.dims()[1]; if v_seq_len > 2 * MAX_SEQ_LEN { v = v .narrow(D::Minus1, v_seq_len - MAX_SEQ_LEN, MAX_SEQ_LEN)? .contiguous()? } } cache[block_idx] = Some((k.clone(), v.clone())) } let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let y = if self.use_flash_attn { // flash-attn expects (b_sz, seq_len, nheads, head_dim) let q = q.transpose(1, 2)?; let k = k.transpose(1, 2)?; let v = v.transpose(1, 2)?; let softmax_scale = 1f32 / (self.head_dim as f32).sqrt(); flash_attn(&q, &k, &v, softmax_scale, seq_len > 1)?.transpose(1, 2)? } else { let in_dtype = q.dtype(); let q = q.to_dtype(DType::F32)?; let k = k.to_dtype(DType::F32)?; let v = v.to_dtype(DType::F32)?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let mask = self.cache.mask(seq_len)?.broadcast_as(att.shape())?; let att = masked_fill(&att, &mask, f32::NEG_INFINITY)?; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)? }; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, hidden_size])?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.num_attention_heads / self.num_key_value_heads; if n_rep == 1 { Ok(x) } else { let (b_sz, n_kv_head, seq_len, head_dim) = x.dims4()?; let x = x .unsqueeze(2)? .expand((b_sz, n_kv_head, n_rep, seq_len, head_dim))? .reshape((b_sz, n_kv_head * n_rep, seq_len, head_dim))?; Ok(x) } } fn load(vb: VarBuilder, cache: &Cache, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "attn"); let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot"); let size_in = cfg.hidden_size; let size_q = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_attention_heads; let size_kv = (cfg.hidden_size / cfg.num_attention_heads) * cfg.num_key_value_heads; let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?; let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?; let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?; let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_attention_heads: cfg.num_attention_heads, num_key_value_heads: cfg.num_key_value_heads, head_dim: cfg.hidden_size / cfg.num_attention_heads, cache: cache.clone(), use_flash_attn: cfg.use_flash_attn, span, span_rot, }) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } struct Mlp { c_fc1: Linear, c_fc2: Linear, c_proj: Linear, span: tracing::Span, } impl Mlp { fn forward(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let x = (candle_nn::ops::silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "mlp"); let h_size = cfg.hidden_size; let i_size = cfg.intermediate_size; let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?; let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?; let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?; Ok(Self { c_fc1, c_fc2, c_proj, span, }) } } struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, span: tracing::Span, } impl Block { fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> { let _enter = self.span.enter(); let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cache: &Cache, cfg: &Config) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "block"); let attn = CausalSelfAttention::load(vb.pp("self_attn"), cache, cfg)?; let mlp = Mlp::load(vb.pp("mlp"), cfg)?; let rms_1 = RmsNorm::load(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let rms_2 = RmsNorm::load( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { rms_1, attn, rms_2, mlp, span, }) } } pub struct Llama { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, } impl Llama { pub fn forward(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let (_b_sz, seq_len) = x.dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx)?; } let x = self.ln_f.forward(&x)?; let x = x.i((.., seq_len - 1, ..))?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cache: &Cache, cfg: &Config) -> Result<Self> { let wte = embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; let ln_f = RmsNorm::load(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.num_hidden_layers) .map(|i| Block::load(vb.pp(&format!("model.layers.{i}")), cache, cfg).unwrap()) .collect(); Ok(Self { wte, blocks, ln_f, lm_head, }) } }
candle/candle-transformers/src/models/llama.rs/0
{ "file_path": "candle/candle-transformers/src/models/llama.rs", "repo_id": "candle", "token_count": 7702 }
37
use crate::quantized_nn::{linear_no_bias, Embedding, Linear, RmsNorm}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::Activation; use std::sync::Arc; pub use crate::models::mistral::Config; #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } impl RotaryEmbedding { fn new(cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(DType::F32)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?; let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn repeat_kv(&self, xs: Tensor) -> Result<Tensor> { let n_rep = self.num_kv_groups; if n_rep == 1 { Ok(xs) } else { let (b_sz, num_kv_heads, seq_len, head_dim) = xs.dims4()?; xs.unsqueeze(2)? .expand((b_sz, num_kv_heads, n_rep, seq_len, head_dim))? .reshape((b_sz, num_kv_heads * n_rep, seq_len, head_dim)) } } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = self.repeat_kv(key_states)?; let value_states = self.repeat_kv(value_states)?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, sliding_window: usize, device: Device, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, sliding_window: cfg.sliding_window, device: vb.device().clone(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + self.sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(DType::F32) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
candle/candle-transformers/src/models/quantized_mistral.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_mistral.rs", "repo_id": "candle", "token_count": 6082 }
38
//! # Denoising Diffusion Implicit Models //! //! The Denoising Diffusion Implicit Models (DDIM) is a simple scheduler //! similar to Denoising Diffusion Probabilistic Models (DDPM). The DDPM //! generative process is the reverse of a Markovian process, DDIM generalizes //! this to non-Markovian guidance. //! //! Denoising Diffusion Implicit Models, J. Song et al, 2020. //! https://arxiv.org/abs/2010.02502 use super::schedulers::{ betas_for_alpha_bar, BetaSchedule, PredictionType, Scheduler, SchedulerConfig, TimestepSpacing, }; use candle::{Result, Tensor}; /// The configuration for the DDIM scheduler. #[derive(Debug, Clone, Copy)] pub struct DDIMSchedulerConfig { /// The value of beta at the beginning of training. pub beta_start: f64, /// The value of beta at the end of training. pub beta_end: f64, /// How beta evolved during training. pub beta_schedule: BetaSchedule, /// The amount of noise to be added at each step. pub eta: f64, /// Adjust the indexes of the inference schedule by this value. pub steps_offset: usize, /// prediction type of the scheduler function, one of `epsilon` (predicting /// the noise of the diffusion process), `sample` (directly predicting the noisy sample`) /// or `v_prediction` (see section 2.4 https://imagen.research.google/video/paper.pdf) pub prediction_type: PredictionType, /// number of diffusion steps used to train the model pub train_timesteps: usize, /// time step spacing for the diffusion process pub timestep_spacing: TimestepSpacing, } impl Default for DDIMSchedulerConfig { fn default() -> Self { Self { beta_start: 0.00085f64, beta_end: 0.012f64, beta_schedule: BetaSchedule::ScaledLinear, eta: 0., steps_offset: 1, prediction_type: PredictionType::Epsilon, train_timesteps: 1000, timestep_spacing: TimestepSpacing::Leading, } } } impl SchedulerConfig for DDIMSchedulerConfig { fn build(&self, inference_steps: usize) -> Result<Box<dyn Scheduler>> { Ok(Box::new(DDIMScheduler::new(inference_steps, *self)?)) } } /// The DDIM scheduler. #[derive(Debug, Clone)] pub struct DDIMScheduler { timesteps: Vec<usize>, alphas_cumprod: Vec<f64>, step_ratio: usize, init_noise_sigma: f64, pub config: DDIMSchedulerConfig, } // clip_sample: False, set_alpha_to_one: False impl DDIMScheduler { /// Creates a new DDIM scheduler given the number of steps to be /// used for inference as well as the number of steps that was used /// during training. fn new(inference_steps: usize, config: DDIMSchedulerConfig) -> Result<Self> { let step_ratio = config.train_timesteps / inference_steps; let timesteps: Vec<usize> = match config.timestep_spacing { TimestepSpacing::Leading => (0..(inference_steps)) .map(|s| s * step_ratio + config.steps_offset) .rev() .collect(), TimestepSpacing::Trailing => std::iter::successors(Some(config.train_timesteps), |n| { if *n > step_ratio { Some(n - step_ratio) } else { None } }) .map(|n| n - 1) .collect(), TimestepSpacing::Linspace => { super::utils::linspace(0.0, (config.train_timesteps - 1) as f64, inference_steps)? .to_vec1::<f64>()? .iter() .map(|&f| f as usize) .rev() .collect() } }; let betas = match config.beta_schedule { BetaSchedule::ScaledLinear => super::utils::linspace( config.beta_start.sqrt(), config.beta_end.sqrt(), config.train_timesteps, )? .sqr()?, BetaSchedule::Linear => { super::utils::linspace(config.beta_start, config.beta_end, config.train_timesteps)? } BetaSchedule::SquaredcosCapV2 => betas_for_alpha_bar(config.train_timesteps, 0.999)?, }; let betas = betas.to_vec1::<f64>()?; let mut alphas_cumprod = Vec::with_capacity(betas.len()); for &beta in betas.iter() { let alpha = 1.0 - beta; alphas_cumprod.push(alpha * *alphas_cumprod.last().unwrap_or(&1f64)) } Ok(Self { alphas_cumprod, timesteps, step_ratio, init_noise_sigma: 1., config, }) } } impl Scheduler for DDIMScheduler { /// Performs a backward step during inference. fn step(&self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor> { let timestep = if timestep >= self.alphas_cumprod.len() { timestep - 1 } else { timestep }; // https://github.com/huggingface/diffusers/blob/6e099e2c8ce4c4f5c7318e970a8c093dc5c7046e/src/diffusers/schedulers/scheduling_ddim.py#L195 let prev_timestep = if timestep > self.step_ratio { timestep - self.step_ratio } else { 0 }; let alpha_prod_t = self.alphas_cumprod[timestep]; let alpha_prod_t_prev = self.alphas_cumprod[prev_timestep]; let beta_prod_t = 1. - alpha_prod_t; let beta_prod_t_prev = 1. - alpha_prod_t_prev; let (pred_original_sample, pred_epsilon) = match self.config.prediction_type { PredictionType::Epsilon => { let pred_original_sample = ((sample - (model_output * beta_prod_t.sqrt())?)? * (1. / alpha_prod_t.sqrt()))?; (pred_original_sample, model_output.clone()) } PredictionType::VPrediction => { let pred_original_sample = ((sample * alpha_prod_t.sqrt())? - (model_output * beta_prod_t.sqrt())?)?; let pred_epsilon = ((model_output * alpha_prod_t.sqrt())? + (sample * beta_prod_t.sqrt())?)?; (pred_original_sample, pred_epsilon) } PredictionType::Sample => { let pred_original_sample = model_output.clone(); let pred_epsilon = ((sample - &pred_original_sample * alpha_prod_t.sqrt())? * (1. / beta_prod_t.sqrt()))?; (pred_original_sample, pred_epsilon) } }; let variance = (beta_prod_t_prev / beta_prod_t) * (1. - alpha_prod_t / alpha_prod_t_prev); let std_dev_t = self.config.eta * variance.sqrt(); let pred_sample_direction = (pred_epsilon * (1. - alpha_prod_t_prev - std_dev_t * std_dev_t).sqrt())?; let prev_sample = ((pred_original_sample * alpha_prod_t_prev.sqrt())? + pred_sample_direction)?; if self.config.eta > 0. { &prev_sample + Tensor::randn( 0f32, std_dev_t as f32, prev_sample.shape(), prev_sample.device(), )? } else { Ok(prev_sample) } } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Result<Tensor> { Ok(sample) } fn timesteps(&self) -> &[usize] { self.timesteps.as_slice() } fn add_noise(&self, original: &Tensor, noise: Tensor, timestep: usize) -> Result<Tensor> { let timestep = if timestep >= self.alphas_cumprod.len() { timestep - 1 } else { timestep }; let sqrt_alpha_prod = self.alphas_cumprod[timestep].sqrt(); let sqrt_one_minus_alpha_prod = (1.0 - self.alphas_cumprod[timestep]).sqrt(); (original * sqrt_alpha_prod)? + (noise * sqrt_one_minus_alpha_prod)? } fn init_noise_sigma(&self) -> f64 { self.init_noise_sigma } }
candle/candle-transformers/src/models/stable_diffusion/ddim.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/ddim.rs", "repo_id": "candle", "token_count": 3953 }
39
// Audio processing code, adapted from whisper.cpp // https://github.com/ggerganov/whisper.cpp pub trait Float: num_traits::Float + num_traits::FloatConst + num_traits::NumAssign {} impl Float for f32 {} impl Float for f64 {} // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2357 fn fft<T: Float>(inp: &[T]) -> Vec<T> { let n = inp.len(); let zero = T::zero(); if n == 1 { return vec![inp[0], zero]; } if n % 2 == 1 { return dft(inp); } let mut out = vec![zero; n * 2]; let mut even = Vec::with_capacity(n / 2); let mut odd = Vec::with_capacity(n / 2); for (i, &inp) in inp.iter().enumerate() { if i % 2 == 0 { even.push(inp) } else { odd.push(inp); } } let even_fft = fft(&even); let odd_fft = fft(&odd); let two_pi = T::PI() + T::PI(); let n_t = T::from(n).unwrap(); for k in 0..n / 2 { let k_t = T::from(k).unwrap(); let theta = two_pi * k_t / n_t; let re = theta.cos(); let im = -theta.sin(); let re_odd = odd_fft[2 * k]; let im_odd = odd_fft[2 * k + 1]; out[2 * k] = even_fft[2 * k] + re * re_odd - im * im_odd; out[2 * k + 1] = even_fft[2 * k + 1] + re * im_odd + im * re_odd; out[2 * (k + n / 2)] = even_fft[2 * k] - re * re_odd + im * im_odd; out[2 * (k + n / 2) + 1] = even_fft[2 * k + 1] - re * im_odd - im * re_odd; } out } // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2337 fn dft<T: Float>(inp: &[T]) -> Vec<T> { let zero = T::zero(); let n = inp.len(); let two_pi = T::PI() + T::PI(); let mut out = Vec::with_capacity(2 * n); let n_t = T::from(n).unwrap(); for k in 0..n { let k_t = T::from(k).unwrap(); let mut re = zero; let mut im = zero; for (j, &inp) in inp.iter().enumerate() { let j_t = T::from(j).unwrap(); let angle = two_pi * k_t * j_t / n_t; re += inp * angle.cos(); im -= inp * angle.sin(); } out.push(re); out.push(im); } out } #[allow(clippy::too_many_arguments)] // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2414 fn log_mel_spectrogram_w<T: Float>( ith: usize, hann: &[T], samples: &[T], filters: &[T], fft_size: usize, fft_step: usize, speed_up: bool, n_len: usize, n_mel: usize, n_threads: usize, ) -> Vec<T> { let n_fft = if speed_up { 1 + fft_size / 4 } else { 1 + fft_size / 2 }; let zero = T::zero(); let half = T::from(0.5).unwrap(); let mut fft_in = vec![zero; fft_size]; let mut mel = vec![zero; n_len * n_mel]; for i in (ith..n_len).step_by(n_threads) { let offset = i * fft_step; // apply Hanning window for j in 0..fft_size { fft_in[j] = if offset + j < samples.len() { hann[j] * samples[offset + j] } else { zero } } // FFT -> mag^2 let mut fft_out: Vec<T> = fft(&fft_in); for j in 0..fft_size { fft_out[j] = fft_out[2 * j] * fft_out[2 * j] + fft_out[2 * j + 1] * fft_out[2 * j + 1]; } for j in 1..fft_size / 2 { let v = fft_out[fft_size - j]; fft_out[j] += v; } if speed_up { // scale down in the frequency domain results in a speed up in the time domain for j in 0..n_fft { fft_out[j] = half * (fft_out[2 * j] + fft_out[2 * j + 1]); } } // mel spectrogram for j in 0..n_mel { let mut sum = zero; for k in 0..n_fft { sum += fft_out[k] * filters[j * n_fft + k]; } mel[j * n_len + i] = T::max(sum, T::from(1e-10).unwrap()).log10(); } } mel } fn log_mel_spectrogram_<T: Float + std::fmt::Display>( samples: &[T], filters: &[T], fft_size: usize, fft_step: usize, n_mel: usize, speed_up: bool, ) -> Vec<T> { let zero = T::zero(); let two_pi = T::PI() + T::PI(); let half = T::from(0.5).unwrap(); let one = T::from(1.0).unwrap(); let four = T::from(4.0).unwrap(); let fft_size_t = T::from(fft_size).unwrap(); let hann: Vec<T> = (0..fft_size) .map(|i| half * (one - ((two_pi * T::from(i).unwrap()) / fft_size_t).cos())) .collect(); let n_len = samples.len() / fft_step; // pad audio with at least one extra chunk of zeros let pad = 100 * super::CHUNK_LENGTH / 2; let n_len = if n_len % pad != 0 { (n_len / pad + 1) * pad } else { n_len }; let n_len = n_len + pad; let samples = { let mut samples_padded = samples.to_vec(); let to_add = n_len * fft_step - samples.len(); samples_padded.extend(std::iter::repeat(zero).take(to_add)); samples_padded }; // Use a single thread for now. let mut mel = log_mel_spectrogram_w( 0, &hann, &samples, filters, fft_size, fft_step, speed_up, n_len, n_mel, 1, ); let mmax = mel .iter() .max_by(|&u, &v| u.partial_cmp(v).unwrap_or(std::cmp::Ordering::Greater)) .copied() .unwrap_or(zero) - T::from(8).unwrap(); for m in mel.iter_mut() { let v = T::max(*m, mmax); *m = v / four + one } mel } pub fn pcm_to_mel<T: Float + std::fmt::Display>( cfg: &super::Config, samples: &[T], filters: &[T], ) -> Vec<T> { log_mel_spectrogram_( samples, filters, super::N_FFT, super::HOP_LENGTH, cfg.num_mel_bins, false, ) }
candle/candle-transformers/src/models/whisper/audio.rs/0
{ "file_path": "candle/candle-transformers/src/models/whisper/audio.rs", "repo_id": "candle", "token_count": 3131 }
40
use crate::models::with_tracing::QMatMul; use crate::quantized_var_builder::VarBuilder; use candle::{Module, Result, Tensor}; #[derive(Debug, Clone)] pub struct Embedding { inner: candle_nn::Embedding, span: tracing::Span, } impl Embedding { pub fn new(d1: usize, d2: usize, vb: VarBuilder) -> Result<Self> { let embeddings = vb.get((d1, d2), "weight")?.dequantize(vb.device())?; let inner = candle_nn::Embedding::new(embeddings, d2); let span = tracing::span!(tracing::Level::TRACE, "embedding"); Ok(Self { inner, span }) } pub fn embeddings(&self) -> &Tensor { self.inner.embeddings() } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } #[derive(Debug, Clone)] pub struct Linear { weight: QMatMul, bias: Option<Tensor>, } impl Linear { pub fn from_weights(weight: QMatMul, bias: Option<Tensor>) -> Self { Self { weight, bias } } } impl Module for Linear { fn forward(&self, x: &Tensor) -> candle::Result<Tensor> { let x = x.apply(&self.weight)?; match &self.bias { None => Ok(x), Some(bias) => x.broadcast_add(bias), } } } pub fn linear(in_dim: usize, out_dim: usize, vb: VarBuilder) -> Result<Linear> { let bias = vb.get(out_dim, "bias")?.dequantize(vb.device())?; let weight = QMatMul::new(in_dim, out_dim, vb)?; Ok(Linear { weight, bias: Some(bias), }) } pub fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<candle_nn::LayerNorm> { let weight = vb.get(size, "weight")?.dequantize(vb.device())?; let bias = vb.get(size, "bias")?.dequantize(vb.device())?; Ok(candle_nn::LayerNorm::new(weight, bias, eps)) } pub fn layer_norm_no_bias(size: usize, eps: f64, vb: VarBuilder) -> Result<candle_nn::LayerNorm> { let weight = vb.get(size, "weight")?.dequantize(vb.device())?; Ok(candle_nn::LayerNorm::new_no_bias(weight, eps)) } pub fn linear_no_bias(in_dim: usize, out_dim: usize, vb: VarBuilder) -> Result<Linear> { let weight = QMatMul::new(in_dim, out_dim, vb)?; Ok(Linear { weight, bias: None }) } #[derive(Debug, Clone)] pub struct RmsNorm { inner: candle_nn::RmsNorm, span: tracing::Span, } impl RmsNorm { pub fn new(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "rms-norm"); let weight = vb.get(size, "weight")?.dequantize(vb.device())?; let inner = candle_nn::RmsNorm::new(weight, eps); Ok(Self { inner, span }) } } impl Module for RmsNorm { fn forward(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(x) } }
candle/candle-transformers/src/quantized_nn.rs/0
{ "file_path": "candle/candle-transformers/src/quantized_nn.rs", "repo_id": "candle", "token_count": 1282 }
41
<!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <title>Candle Blip Image Captioning Demo</title> <script src="https://cdn.tailwindcss.com"></script> <script type="module" src="./code.js"></script> <script type="module"> const MODELS = { blip_image_quantized_q4k: { base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/", model: "blip-image-captioning-large-q4k.gguf", config: "config.json", tokenizer: "tokenizer.json", quantized: true, size: "271 MB", }, blip_image_quantized_q80: { base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/", model: "blip-image-captioning-large-q80.gguf", config: "config.json", tokenizer: "tokenizer.json", quantized: true, size: "505 MB", }, blip_image_large: { base_url: "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/refs%2Fpr%2F18/", model: "model.safetensors", config: "config.json", tokenizer: "tokenizer.json", quantized: false, size: "1.88 GB", }, }; const blipWorker = new Worker("./blipWorker.js", { type: "module", }); const outputStatusEl = document.querySelector("#output-status"); const outputCaptionEl = document.querySelector("#output-caption"); const modelSelectEl = document.querySelector("#model"); const clearBtn = document.querySelector("#clear-btn"); const fileUpload = document.querySelector("#file-upload"); const dropArea = document.querySelector("#drop-area"); const imagesExamples = document.querySelector("#image-select"); const canvas = document.querySelector("#canvas"); const ctxCanvas = canvas.getContext("2d"); let isCaptioning = false; let currentImageURL = null; clearBtn.addEventListener("click", () => { clearImageCanvas(); }); modelSelectEl.addEventListener("change", () => { if (currentImageURL) { runInference(currentImageURL); } }); //add event listener to file input fileUpload.addEventListener("input", async (e) => { const target = e.target; if (target.files.length > 0) { const href = URL.createObjectURL(target.files[0]); clearImageCanvas(); await drawImageCanvas(href); runInference(href); } }); // add event listener to drop-area dropArea.addEventListener("dragenter", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("dragleave", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); }); dropArea.addEventListener("dragover", (e) => { e.preventDefault(); }); dropArea.addEventListener("drop", async (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); const url = e.dataTransfer.getData("text/uri-list"); const files = e.dataTransfer.files; if (files.length > 0) { const href = URL.createObjectURL(files[0]); clearImageCanvas(); await drawImageCanvas(href); runInference(href); } else if (url) { clearImageCanvas(); await drawImageCanvas(url); runInference(url); } }); imagesExamples.addEventListener("click", async (e) => { if (isCaptioning) { return; } const target = e.target; if (target.nodeName === "IMG") { const href = target.src; clearImageCanvas(); await drawImageCanvas(href); runInference(href); } }); function clearImageCanvas() { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); isCaptioning = false; clearBtn.disabled = true; canvas.parentElement.style.height = "auto"; outputStatusEl.hidden = false; outputCaptionEl.hidden = true; outputStatusEl.innerText = "Please select an image"; currentImageURL = null; } async function drawImageCanvas(imgURL) { if (!imgURL) { throw new Error("No image URL provided"); } return new Promise((resolve, reject) => { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); const img = new Image(); img.crossOrigin = "anonymous"; img.onload = () => { canvas.width = img.width; canvas.height = img.height; ctxCanvas.drawImage(img, 0, 0); canvas.parentElement.style.height = canvas.offsetHeight + "px"; clearBtn.disabled = false; resolve(img); }; img.src = imgURL; currentImageURL = imgURL; }); } document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelSelectEl.appendChild(option); } }); async function getImageCaption( worker, weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized, updateStatus = null ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } function updateStatus(data) { if (data.status === "status") { outputStatusEl.innerText = data.message; } } async function runInference(imageURL) { if (isCaptioning || !imageURL) { alert("Please select an image first"); return; } outputStatusEl.hidden = false; outputCaptionEl.hidden = true; clearBtn.disabled = true; modelSelectEl.disabled = true; isCaptioning = true; const selectedModel = modelSelectEl.value; const model = MODELS[selectedModel]; const weightsURL = `${model.base_url}${model.model}`; const tokenizerURL = `${model.base_url}${model.tokenizer}`; const configURL = `${model.base_url}${model.config}`; const quantized = model.quantized; try { const time = performance.now(); const caption = await getImageCaption( blipWorker, weightsURL, tokenizerURL, configURL, selectedModel, imageURL, quantized, updateStatus ); outputStatusEl.hidden = true; outputCaptionEl.hidden = false; const totalTime = ((performance.now() - time)/1000).toFixed(2); outputCaptionEl.innerHTML = `${ caption.output }<br/><span class="text-xs">Inference time: ${totalTime} s</span>`; } catch (err) { console.error(err); outputStatusEl.hidden = false; outputCaptionEl.hidden = true; outputStatusEl.innerText = err.message; } clearBtn.disabled = false; modelSelectEl.disabled = false; isCaptioning = false; } </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-5 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle BLIP Image Captioning</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> <a href="https://huggingface.co/Salesforce/blip-image-captioning-large" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >BLIP Image Captioning </a> running in the browser using <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle</a >, a minimalist ML framework for Rust. </p> <p class="text-xs max-w-lg py-2"> <b>Note:</b> The image captioning on the smallest model takes about ~50 seconds, it will vary depending on your machine and model size. </p> </div> <div> <label for="model" class="font-medium block">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light interactive disabled:cursor-not-allowed w-full max-w-max" ></select> </div> <!-- drag and drop area --> <div class="grid gap-4 sm:grid-cols-2 py-4"> <div class="relative max-w-lg"> <div class="absolute w-full bottom-full flex justify-between items-center" > <div class="flex gap-2 w-full"> <button id="clear-btn" disabled title="Clear Image" class="ml-auto text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center" > <svg class="" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 13 12" height="1em" > <path d="M1.6.7 12 11.1M12 .7 1.6 11.1" stroke="#2E3036" stroke-width="2" /> </svg> </button> </div> </div> <div id="drop-area" class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative aspect-video w-full overflow-hidden" > <div class="flex flex-col items-center justify-center space-y-1 text-center" > <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg" > <path d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z" fill="#000" /> </svg> <div class="flex text-sm text-gray-600"> <label for="file-upload" class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700" > <span>Drag and drop y our image here</span> <span class="block text-xs">or</span> <span class="block text-xs">Click to upload</span> </label> </div> <input id="file-upload" name="file-upload" type="file" class="sr-only" /> </div> <canvas id="canvas" class="absolute pointer-events-none w-full" ></canvas> </div> </div> <div class=""> <div class="h-full bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2" > <p id="output-caption" class="m-auto text-xl text-center p-2" hidden ></p> <span id="output-status" class="m-auto font-light"> Please select an image </span> </div> </div> </div> <div> <div class="flex gap-3 items-center overflow-x-scroll" id="image-select" > <h3 class="font-medium">Examples:</h3> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg" class="cursor-pointer w-24 h-24 object-cover" /> </div> </div> </main> </body> </html>
candle/candle-wasm-examples/blip/index.html/0
{ "file_path": "candle/candle-wasm-examples/blip/index.html", "repo_id": "candle", "token_count": 7164 }
42
use crate::model::{Cache, Config, Llama}; use byteorder::{LittleEndian, ReadBytesExt}; use candle::{DType, Device, IndexOp, Result, Shape, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use serde::{Deserialize, Serialize}; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; use yew_agent::{HandlerId, Public, WorkerLink}; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string())) } // Communication to the worker happens through bincode, the model weights and configs are fetched // on the main thread and transferred via the following structure. #[derive(Serialize, Deserialize)] pub struct ModelData { pub tokenizer: Vec<u8>, pub model: Vec<u8>, } fn read_i32<R: std::io::Read>(r: &mut R) -> Result<i32> { let mut buf = [0u8; 4]; r.read_exact(&mut buf)?; Ok(i32::from_le_bytes(buf)) } fn read_tensor<R: std::io::Read, S: Into<Shape>>( r: &mut R, shape: S, dev: &Device, ) -> Result<Tensor> { let shape = shape.into(); let mut data_t = vec![0f32; shape.elem_count()]; r.read_f32_into::<LittleEndian>(&mut data_t)?; let tensor = Tensor::from_vec(data_t, shape, dev)?; Ok(tensor) } pub struct Model { pub cache: Cache, pub config: Config, pub llama: Llama, pub tokenizer: Tokenizer, } impl Model { fn run( &self, link: &WorkerLink<Worker>, id: HandlerId, temp: f64, top_p: f64, prompt: String, ) -> Result<()> { let dev = Device::Cpu; let temp = if temp <= 0. { None } else { Some(temp) }; let top_p = if top_p <= 0. || top_p >= 1.0 { None } else { Some(top_p) }; console_log!("temp: {temp:?} top_p: {top_p:?} prompt: {prompt}"); let mut logits_processor = LogitsProcessor::new(299792458, temp, top_p); let mut index_pos = 0; let mut tokens = self .tokenizer .encode(prompt.to_string(), true) .map_err(|m| candle::Error::Msg(m.to_string()))? .get_ids() .to_vec(); link.respond(id, Ok(WorkerOutput::Generated(prompt))); for index in 0.. { if tokens.len() >= self.config.seq_len { break; } let context_size = if self.cache.use_kv_cache && index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &dev)?.unsqueeze(0)?; let logits = self.llama.forward(&input, index_pos)?; let logits = logits.squeeze(0)?; index_pos += ctxt.len(); let next_token = logits_processor.sample(&logits)?; tokens.push(next_token); if let Some(text) = self.tokenizer.id_to_token(next_token) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); link.respond(id, Ok(WorkerOutput::Generated(text))); } } Ok(()) } } impl Config { fn from_reader<R: std::io::Read>(r: &mut R) -> Result<Self> { let dim = read_i32(r)? as usize; let hidden_dim = read_i32(r)? as usize; let n_layers = read_i32(r)? as usize; let n_heads = read_i32(r)? as usize; let n_kv_heads = read_i32(r)? as usize; let vocab_size = read_i32(r)? as usize; let seq_len = read_i32(r)? as usize; Ok(Self { dim, hidden_dim, n_layers, n_heads, n_kv_heads, vocab_size, seq_len, norm_eps: 1e-5, }) } pub fn head_size(&self) -> usize { self.dim / self.n_heads } } struct TransformerWeights { // token embedding table token_embedding_table: Tensor, // (vocab_size, dim) // weights for rmsnorms rms_att_weight: Tensor, // (layer, dim) rmsnorm weights rms_ffn_weight: Tensor, // (layer, dim) // weights for matmuls wq: Tensor, // (layer, dim, dim) wk: Tensor, // (layer, dim, dim) wv: Tensor, // (layer, dim, dim) wo: Tensor, // (layer, dim, dim) // weights for ffn w1: Tensor, // (layer, hidden_dim, dim) w2: Tensor, // (layer, dim, hidden_dim) w3: Tensor, // (layer, hidden_dim, dim) // final rmsnorm rms_final_weight: Tensor, // (dim,) // freq_cis for RoPE relatively positional embeddings freq_cis_real: Tensor, // (seq_len, head_size/2) freq_cis_imag: Tensor, // (seq_len, head_size/2) } impl TransformerWeights { fn from_reader<R: std::io::Read>(r: &mut R, c: &Config, dev: &Device) -> Result<Self> { let token_embedding_table = read_tensor(r, (c.vocab_size, c.dim), dev)?; let rms_att_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let wq = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wk = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wv = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wo = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let rms_ffn_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let w1 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let w2 = read_tensor(r, (c.n_layers, c.dim, c.hidden_dim), dev)?; let w3 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let rms_final_weight = read_tensor(r, c.dim, dev)?; let head_size = c.head_size(); let freq_cis_real = read_tensor(r, (c.seq_len, head_size / 2), dev)?; let freq_cis_imag = read_tensor(r, (c.seq_len, head_size / 2), dev)?; Ok(Self { token_embedding_table, rms_att_weight, wq, wk, wv, wo, rms_ffn_weight, w1, w2, w3, rms_final_weight, freq_cis_real, freq_cis_imag, }) } fn var_builder(&self, cfg: &Config, device: &Device) -> Result<VarBuilder> { let mut ws = std::collections::HashMap::new(); let mut insert = |name: &str, t: Tensor| { ws.insert(name.to_string(), t); }; insert("rot.freq_cis_real", self.freq_cis_real.clone()); insert("rot.freq_cis_imag", self.freq_cis_imag.clone()); insert( "model.embed_tokens.weight", self.token_embedding_table.clone(), ); insert("lm_head.weight", self.token_embedding_table.clone()); insert("model.norm.weight", self.rms_final_weight.clone()); for layer in 0..cfg.n_layers { ws.insert( format!("model.layers.{layer}.self_attn.q_proj.weight"), self.wq.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.k_proj.weight"), self.wk.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.v_proj.weight"), self.wv.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.o_proj.weight"), self.wo.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.gate_proj.weight"), self.w1.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.down_proj.weight"), self.w2.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.up_proj.weight"), self.w3.i(layer)?, ); ws.insert( format!("model.layers.{layer}.input_layernorm.weight"), self.rms_att_weight.i(layer)?, ); ws.insert( format!("model.layers.{layer}.post_attention_layernorm.weight"), self.rms_ffn_weight.i(layer)?, ); } let vb = VarBuilder::from_tensors(ws, DType::F32, device); Ok(vb) } } impl Model { pub fn load(md: ModelData) -> Result<Self> { let dev = Device::Cpu; let mut model = std::io::Cursor::new(md.model); let config = Config::from_reader(&mut model)?; let weights = TransformerWeights::from_reader(&mut model, &config, &dev)?; let vb = weights.var_builder(&config, &dev)?; let cache = Cache::new(true, &config, vb.pp("rot"))?; let llama = Llama::load(vb, &cache, &config)?; let tokenizer = Tokenizer::from_bytes(&md.tokenizer).map_err(|m| candle::Error::Msg(m.to_string()))?; Ok(Self { cache, config, llama, tokenizer, }) } } pub struct Worker { link: WorkerLink<Self>, model: Option<Model>, } #[derive(Serialize, Deserialize)] pub enum WorkerInput { ModelData(ModelData), Run(f64, f64, String), } #[derive(Serialize, Deserialize)] pub enum WorkerOutput { Generated(String), GenerationDone(std::result::Result<(), String>), WeightsLoaded, } impl yew_agent::Worker for Worker { type Input = WorkerInput; type Message = (); type Output = std::result::Result<WorkerOutput, String>; type Reach = Public<Self>; fn create(link: WorkerLink<Self>) -> Self { Self { link, model: None } } fn update(&mut self, _msg: Self::Message) { // no messaging } fn handle_input(&mut self, msg: Self::Input, id: HandlerId) { let output = match msg { WorkerInput::ModelData(md) => match Model::load(md) { Ok(model) => { self.model = Some(model); Ok(WorkerOutput::WeightsLoaded) } Err(err) => Err(format!("model creation error {err:?}")), }, WorkerInput::Run(temp, top_p, prompt) => match &mut self.model { None => Err("model has not been set yet".to_string()), Some(model) => { { let mut cache = model.cache.kvs.lock().unwrap(); for elem in cache.iter_mut() { *elem = None } } let result = model .run(&self.link, id, temp, top_p, prompt) .map_err(|e| e.to_string()); Ok(WorkerOutput::GenerationDone(result)) } }, }; self.link.respond(id, output); } fn name_of_resource() -> &'static str { "worker.js" } fn resource_path_is_relative() -> bool { true } }
candle/candle-wasm-examples/llama2-c/src/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/src/worker.rs", "repo_id": "candle", "token_count": 5770 }
43
## Running T5 with Candle and WASM Here, we provide two examples of how to run Bert using a Candle-compiled WASM binary and runtime. ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { ModelConditionalGeneration, ModelEncoder } from "./build/m.js"; ``` For the quantized version, we need to import the quantized module: ```js import init, { ModelConditionalGeneration, ModelEncoder } from "./build/m-quantized.js"; ``` The full example can be found under `./index.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/index.html` in your browser.
candle/candle-wasm-examples/t5/README.md/0
{ "file_path": "candle/candle-wasm-examples/t5/README.md", "repo_id": "candle", "token_count": 282 }
44
use yew_agent::PublicWorker; fn main() { console_error_panic_hook::set_once(); candle_wasm_example_yolo::Worker::register(); }
candle/candle-wasm-examples/yolo/src/bin/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/bin/worker.rs", "repo_id": "candle", "token_count": 53 }
45
.DS_Store node_modules /build /.svelte-kit /package .env .env.* !.env.example # Ignore files for PNPM, NPM and YARN pnpm-lock.yaml package-lock.json yarn.lock
chat-ui/.eslintignore/0
{ "file_path": "chat-ui/.eslintignore", "repo_id": "chat-ui", "token_count": 69 }
46
engine-strict=true
chat-ui/.npmrc/0
{ "file_path": "chat-ui/.npmrc", "repo_id": "chat-ui", "token_count": 7 }
47
declare module "*.ttf" { const value: ArrayBuffer; export default value; }
chat-ui/src/ambient.d.ts/0
{ "file_path": "chat-ui/src/ambient.d.ts", "repo_id": "chat-ui", "token_count": 26 }
48
<script lang="ts"> import CarbonEarth from "~icons/carbon/earth"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import type { Model } from "$lib/types/Model"; export let model: Pick<Model, "name" | "datasetName" | "websiteUrl" | "modelUrl" | "datasetUrl">; export let variant: "light" | "dark" = "light"; </script> <div class="flex items-center gap-5 rounded-xl bg-gray-100 px-3 py-2 text-xs sm:text-sm {variant === 'dark' ? 'text-gray-600 dark:bg-gray-800 dark:text-gray-300' : 'text-gray-800 dark:bg-gray-100 dark:text-gray-600'}" > <a href={model.modelUrl || "https://huggingface.co/" + model.name} target="_blank" rel="noreferrer" class="flex items-center hover:underline" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" /> Model <div class="max-sm:hidden">&nbsp;page</div></a > {#if model.datasetName || model.datasetUrl} <a href={model.datasetUrl || "https://huggingface.co/datasets/" + model.datasetName} target="_blank" rel="noreferrer" class="flex items-center hover:underline" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" /> Dataset <div class="max-sm:hidden">&nbsp;page</div></a > {/if} {#if model.websiteUrl} <a href={model.websiteUrl} target="_blank" class="ml-auto flex items-center hover:underline" rel="noreferrer" > <CarbonEarth class="mr-1.5 shrink-0 text-xs text-gray-400" /> Website </a> {/if} </div>
chat-ui/src/lib/components/ModelCardMetadata.svelte/0
{ "file_path": "chat-ui/src/lib/components/ModelCardMetadata.svelte", "repo_id": "chat-ui", "token_count": 623 }
49
<script lang="ts"> import { PUBLIC_APP_NAME, PUBLIC_VERSION } from "$env/static/public"; import { PUBLIC_ANNOUNCEMENT_BANNERS } from "$env/static/public"; import { PUBLIC_APP_DESCRIPTION } from "$env/static/public"; import Logo from "$lib/components/icons/Logo.svelte"; import { createEventDispatcher } from "svelte"; import IconGear from "~icons/bi/gear-fill"; import AnnouncementBanner from "../AnnouncementBanner.svelte"; import type { Model } from "$lib/types/Model"; import ModelCardMetadata from "../ModelCardMetadata.svelte"; import { findCurrentModel } from "$lib/utils/models"; import { base } from "$app/paths"; import { useSettingsStore } from "$lib/stores/settings"; import JSON5 from "json5"; export let currentModel: Model; export let models: Model[]; const settings = useSettingsStore(); $: currentModelMetadata = findCurrentModel(models, $settings.activeModel); const announcementBanners = PUBLIC_ANNOUNCEMENT_BANNERS ? JSON5.parse(PUBLIC_ANNOUNCEMENT_BANNERS) : []; const dispatch = createEventDispatcher<{ message: string }>(); </script> <div class="my-auto grid gap-8 lg:grid-cols-3"> <div class="lg:col-span-1"> <div> <div class="mb-3 flex items-center text-2xl font-semibold"> <Logo classNames="mr-1 flex-none" /> {PUBLIC_APP_NAME} <div class="ml-3 flex h-6 items-center rounded-lg border border-gray-100 bg-gray-50 px-2 text-base text-gray-400 dark:border-gray-700/60 dark:bg-gray-800" > v{PUBLIC_VERSION} </div> </div> <p class="text-base text-gray-600 dark:text-gray-400"> {PUBLIC_APP_DESCRIPTION || "Making the community's best AI chat models available to everyone."} </p> </div> </div> <div class="lg:col-span-2 lg:pl-24"> {#each announcementBanners as banner} <AnnouncementBanner classNames="mb-4" title={banner.title}> <a target="_blank" href={banner.linkHref} class="mr-2 flex items-center underline hover:no-underline">{banner.linkTitle}</a > </AnnouncementBanner> {/each} <div class="overflow-hidden rounded-xl border dark:border-gray-800"> <div class="flex p-3"> <div> <div class="text-sm text-gray-600 dark:text-gray-400">Current Model</div> <div class="font-semibold">{currentModel.displayName}</div> </div> <a href="{base}/settings/{currentModel.id}" class="btn ml-auto flex h-7 w-7 self-start rounded-full bg-gray-100 p-1 text-xs hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-800 dark:hover:bg-gray-600" ><IconGear /></a > </div> <ModelCardMetadata variant="dark" model={currentModel} /> </div> </div> {#if currentModelMetadata.promptExamples} <div class="lg:col-span-3 lg:mt-6"> <p class="mb-3 text-gray-600 dark:text-gray-300">Examples</p> <div class="grid gap-3 lg:grid-cols-3 lg:gap-5"> {#each currentModelMetadata.promptExamples as example} <button type="button" class="rounded-xl border bg-gray-50 p-2.5 text-gray-600 hover:bg-gray-100 sm:p-4 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-300 dark:hover:bg-gray-700" on:click={() => dispatch("message", example.prompt)} > {example.title} </button> {/each} </div> </div>{/if} </div>
chat-ui/src/lib/components/chat/ChatIntroduction.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ChatIntroduction.svelte", "repo_id": "chat-ui", "token_count": 1318 }
50
import { MONGODB_URL, MONGODB_DB_NAME, MONGODB_DIRECT_CONNECTION } from "$env/static/private"; import { GridFSBucket, MongoClient } from "mongodb"; import type { Conversation } from "$lib/types/Conversation"; import type { SharedConversation } from "$lib/types/SharedConversation"; import type { AbortedGeneration } from "$lib/types/AbortedGeneration"; import type { Settings } from "$lib/types/Settings"; import type { User } from "$lib/types/User"; import type { MessageEvent } from "$lib/types/MessageEvent"; import type { Session } from "$lib/types/Session"; import type { Assistant } from "$lib/types/Assistant"; import type { Report } from "$lib/types/Report"; if (!MONGODB_URL) { throw new Error( "Please specify the MONGODB_URL environment variable inside .env.local. Set it to mongodb://localhost:27017 if you are running MongoDB locally, or to a MongoDB Atlas free instance for example." ); } const client = new MongoClient(MONGODB_URL, { directConnection: MONGODB_DIRECT_CONNECTION === "true", }); export const connectPromise = client.connect().catch(console.error); const db = client.db(MONGODB_DB_NAME + (import.meta.env.MODE === "test" ? "-test" : "")); const conversations = db.collection<Conversation>("conversations"); const assistants = db.collection<Assistant>("assistants"); const reports = db.collection<Report>("reports"); const sharedConversations = db.collection<SharedConversation>("sharedConversations"); const abortedGenerations = db.collection<AbortedGeneration>("abortedGenerations"); const settings = db.collection<Settings>("settings"); const users = db.collection<User>("users"); const sessions = db.collection<Session>("sessions"); const messageEvents = db.collection<MessageEvent>("messageEvents"); const bucket = new GridFSBucket(db, { bucketName: "files" }); export { client, db }; export const collections = { conversations, assistants, reports, sharedConversations, abortedGenerations, settings, users, sessions, messageEvents, bucket, }; client.on("open", () => { conversations .createIndex( { sessionId: 1, updatedAt: -1 }, { partialFilterExpression: { sessionId: { $exists: true } } } ) .catch(console.error); conversations .createIndex( { userId: 1, updatedAt: -1 }, { partialFilterExpression: { userId: { $exists: true } } } ) .catch(console.error); abortedGenerations.createIndex({ updatedAt: 1 }, { expireAfterSeconds: 30 }).catch(console.error); abortedGenerations.createIndex({ conversationId: 1 }, { unique: true }).catch(console.error); sharedConversations.createIndex({ hash: 1 }, { unique: true }).catch(console.error); settings.createIndex({ sessionId: 1 }, { unique: true, sparse: true }).catch(console.error); settings.createIndex({ userId: 1 }, { unique: true, sparse: true }).catch(console.error); users.createIndex({ hfUserId: 1 }, { unique: true }).catch(console.error); users.createIndex({ sessionId: 1 }, { unique: true, sparse: true }).catch(console.error); messageEvents.createIndex({ createdAt: 1 }, { expireAfterSeconds: 60 }).catch(console.error); sessions.createIndex({ expiresAt: 1 }, { expireAfterSeconds: 0 }).catch(console.error); sessions.createIndex({ sessionId: 1 }, { unique: true }).catch(console.error); assistants.createIndex({ createdBy: 1 }).catch(console.error); assistants.createIndex({ userCount: 1 }).catch(console.error); assistants.createIndex({ featured: 1 }).catch(console.error); reports.createIndex({ assistantId: 1 }).catch(console.error); });
chat-ui/src/lib/server/database.ts/0
{ "file_path": "chat-ui/src/lib/server/database.ts", "repo_id": "chat-ui", "token_count": 1102 }
51
import { HF_TOKEN, HF_API_ROOT, MODELS, OLD_MODELS, TASK_MODEL, HF_ACCESS_TOKEN, } from "$env/static/private"; import type { ChatTemplateInput } from "$lib/types/Template"; import { compileTemplate } from "$lib/utils/template"; import { z } from "zod"; import endpoints, { endpointSchema, type Endpoint } from "./endpoints/endpoints"; import endpointTgi from "./endpoints/tgi/endpointTgi"; import { sum } from "$lib/utils/sum"; import { embeddingModels, validateEmbeddingModelByName } from "./embeddingModels"; import JSON5 from "json5"; type Optional<T, K extends keyof T> = Pick<Partial<T>, K> & Omit<T, K>; const modelConfig = z.object({ /** Used as an identifier in DB */ id: z.string().optional(), /** Used to link to the model page, and for inference */ name: z.string().min(1), displayName: z.string().min(1).optional(), description: z.string().min(1).optional(), websiteUrl: z.string().url().optional(), modelUrl: z.string().url().optional(), datasetName: z.string().min(1).optional(), datasetUrl: z.string().url().optional(), userMessageToken: z.string().default(""), userMessageEndToken: z.string().default(""), assistantMessageToken: z.string().default(""), assistantMessageEndToken: z.string().default(""), messageEndToken: z.string().default(""), preprompt: z.string().default(""), prepromptUrl: z.string().url().optional(), chatPromptTemplate: z .string() .default( "{{preprompt}}" + "{{#each messages}}" + "{{#ifUser}}{{@root.userMessageToken}}{{content}}{{@root.userMessageEndToken}}{{/ifUser}}" + "{{#ifAssistant}}{{@root.assistantMessageToken}}{{content}}{{@root.assistantMessageEndToken}}{{/ifAssistant}}" + "{{/each}}" + "{{assistantMessageToken}}" ), promptExamples: z .array( z.object({ title: z.string().min(1), prompt: z.string().min(1), }) ) .optional(), endpoints: z.array(endpointSchema).optional(), parameters: z .object({ temperature: z.number().min(0).max(1), truncate: z.number().int().positive().optional(), max_new_tokens: z.number().int().positive(), stop: z.array(z.string()).optional(), top_p: z.number().positive().optional(), top_k: z.number().positive().optional(), repetition_penalty: z.number().min(-2).max(2).optional(), }) .passthrough() .optional(), multimodal: z.boolean().default(false), unlisted: z.boolean().default(false), embeddingModel: validateEmbeddingModelByName(embeddingModels).optional(), }); const modelsRaw = z.array(modelConfig).parse(JSON5.parse(MODELS)); const processModel = async (m: z.infer<typeof modelConfig>) => ({ ...m, userMessageEndToken: m?.userMessageEndToken || m?.messageEndToken, assistantMessageEndToken: m?.assistantMessageEndToken || m?.messageEndToken, chatPromptRender: compileTemplate<ChatTemplateInput>(m.chatPromptTemplate, m), id: m.id || m.name, displayName: m.displayName || m.name, preprompt: m.prepromptUrl ? await fetch(m.prepromptUrl).then((r) => r.text()) : m.preprompt, parameters: { ...m.parameters, stop_sequences: m.parameters?.stop }, }); const addEndpoint = (m: Awaited<ReturnType<typeof processModel>>) => ({ ...m, getEndpoint: async (): Promise<Endpoint> => { if (!m.endpoints) { return endpointTgi({ type: "tgi", url: `${HF_API_ROOT}/${m.name}`, accessToken: HF_TOKEN ?? HF_ACCESS_TOKEN, weight: 1, model: m, }); } const totalWeight = sum(m.endpoints.map((e) => e.weight)); let random = Math.random() * totalWeight; for (const endpoint of m.endpoints) { if (random < endpoint.weight) { const args = { ...endpoint, model: m }; switch (args.type) { case "tgi": return endpoints.tgi(args); case "aws": return await endpoints.aws(args); case "openai": return await endpoints.openai(args); case "llamacpp": return endpoints.llamacpp(args); case "ollama": return endpoints.ollama(args); default: // for legacy reason return endpoints.tgi(args); } } random -= endpoint.weight; } throw new Error(`Failed to select endpoint`); }, }); export const models = await Promise.all(modelsRaw.map((e) => processModel(e).then(addEndpoint))); export const defaultModel = models[0]; // Models that have been deprecated export const oldModels = OLD_MODELS ? z .array( z.object({ id: z.string().optional(), name: z.string().min(1), displayName: z.string().min(1).optional(), }) ) .parse(JSON5.parse(OLD_MODELS)) .map((m) => ({ ...m, id: m.id || m.name, displayName: m.displayName || m.name })) : []; export const validateModel = (_models: BackendModel[]) => { // Zod enum function requires 2 parameters return z.enum([_models[0].id, ..._models.slice(1).map((m) => m.id)]); }; // if `TASK_MODEL` is string & name of a model in `MODELS`, then we use `MODELS[TASK_MODEL]`, else we try to parse `TASK_MODEL` as a model config itself export const smallModel = TASK_MODEL ? (models.find((m) => m.name === TASK_MODEL) || (await processModel(modelConfig.parse(JSON5.parse(TASK_MODEL))).then((m) => addEndpoint(m) ))) ?? defaultModel : defaultModel; export type BackendModel = Optional< typeof defaultModel, "preprompt" | "parameters" | "multimodal" | "unlisted" >;
chat-ui/src/lib/server/models.ts/0
{ "file_path": "chat-ui/src/lib/server/models.ts", "repo_id": "chat-ui", "token_count": 2040 }
52
// Ideally shouldn't be needed, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850 import type { Conversation } from "./Conversation"; import type { Timestamps } from "./Timestamps"; export interface AbortedGeneration extends Timestamps { conversationId: Conversation["_id"]; }
chat-ui/src/lib/types/AbortedGeneration.ts/0
{ "file_path": "chat-ui/src/lib/types/AbortedGeneration.ts", "repo_id": "chat-ui", "token_count": 93 }
53
import type { ObjectId } from "mongodb"; import type { Conversation } from "./Conversation"; import type { Timestamps } from "./Timestamps"; export interface WebSearch extends Timestamps { _id?: ObjectId; convId?: Conversation["_id"]; prompt: string; searchQuery: string; results: WebSearchSource[]; context: string; contextSources: WebSearchSource[]; } export interface WebSearchSource { title: string; link: string; hostname: string; text?: string; // You.com provides text of webpage right away } export type WebSearchMessageSources = { type: "sources"; sources: WebSearchSource[]; }; export interface YouWebSearch { hits: YouSearchHit[]; latency: number; } interface YouSearchHit { url: string; title: string; description: string; snippets: string[]; } // eslint-disable-next-line no-shadow export enum WebSearchProvider { GOOGLE = "Google", YOU = "You.com", }
chat-ui/src/lib/types/WebSearch.ts/0
{ "file_path": "chat-ui/src/lib/types/WebSearch.ts", "repo_id": "chat-ui", "token_count": 293 }
54
export function sum(nums: number[]): number { return nums.reduce((a, b) => a + b, 0); }
chat-ui/src/lib/utils/sum.ts/0
{ "file_path": "chat-ui/src/lib/utils/sum.ts", "repo_id": "chat-ui", "token_count": 35 }
55
import { base } from "$app/paths"; import { ENABLE_ASSISTANTS } from "$env/static/private"; import { collections } from "$lib/server/database.js"; import type { Assistant } from "$lib/types/Assistant"; import { redirect } from "@sveltejs/kit"; export const load = async ({ url }) => { if (!ENABLE_ASSISTANTS) { throw redirect(302, `${base}/`); } const modelId = url.searchParams.get("modelId"); // fetch the top 10 assistants sorted by user count from biggest to smallest, filter out all assistants with only 1 users. filter by model too if modelId is provided const assistants = await collections.assistants .find({ userCount: { $gt: 1 }, modelId: modelId ?? { $exists: true }, featured: true }) .sort({ userCount: -1 }) .limit(10) .toArray(); return { assistants: JSON.parse(JSON.stringify(assistants)) as Array<Assistant> }; };
chat-ui/src/routes/assistants/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/assistants/+page.server.ts", "repo_id": "chat-ui", "token_count": 267 }
56
import { dev } from "$app/environment"; import { base } from "$app/paths"; import { COOKIE_NAME } from "$env/static/private"; import { collections } from "$lib/server/database"; import { redirect } from "@sveltejs/kit"; export const actions = { async default({ cookies, locals }) { await collections.sessions.deleteOne({ sessionId: locals.sessionId }); cookies.delete(COOKIE_NAME, { path: "/", // So that it works inside the space's iframe sameSite: dev ? "lax" : "none", secure: !dev, httpOnly: true, }); throw redirect(303, `${base}/`); }, };
chat-ui/src/routes/logout/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/logout/+page.server.ts", "repo_id": "chat-ui", "token_count": 203 }
57
<script lang="ts"> import type { ActionData, PageData } from "./$types"; import AssistantSettings from "$lib/components/AssistantSettings.svelte"; export let data: PageData; export let form: ActionData; </script> <AssistantSettings bind:form models={data.models} />
chat-ui/src/routes/settings/assistants/new/+page.svelte/0
{ "file_path": "chat-ui/src/routes/settings/assistants/new/+page.svelte", "repo_id": "chat-ui", "token_count": 80 }
58
import adapter from "@sveltejs/adapter-node"; import { vitePreprocess } from "@sveltejs/kit/vite"; import dotenv from "dotenv"; dotenv.config({ path: "./.env.local" }); dotenv.config({ path: "./.env" }); process.env.PUBLIC_VERSION = process.env.npm_package_version; /** @type {import('@sveltejs/kit').Config} */ const config = { // Consult https://kit.svelte.dev/docs/integrations#preprocessors // for more information about preprocessors preprocess: vitePreprocess(), kit: { adapter: adapter(), paths: { base: process.env.APP_BASE || "", }, csrf: { // handled in hooks.server.ts, because we can have multiple valid origins checkOrigin: false, }, }, }; export default config;
chat-ui/svelte.config.js/0
{ "file_path": "chat-ui/svelte.config.js", "repo_id": "chat-ui", "token_count": 253 }
59
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration SPEED_TEST_N_EXAMPLES = 50_000 SMALL_TEST = 5_000 RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def read(dataset: datasets.Dataset, length): for i in range(length): _ = dataset[i] @get_duration def read_batch(dataset: datasets.Dataset, length, batch_size): for i in range(0, len(dataset), batch_size): _ = dataset[i : i + batch_size] @get_duration def read_formatted(dataset: datasets.Dataset, length, type): with dataset.formatted_as(type=type): for i in range(length): _ = dataset[i] @get_duration def read_formatted_batch(dataset: datasets.Dataset, length, batch_size, type): with dataset.formatted_as(type=type): for i in range(0, length, batch_size): _ = dataset[i : i + batch_size] def benchmark_iterating(): times = {"num examples": SPEED_TEST_N_EXAMPLES} functions = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted, {"type": "pandas", "length": SMALL_TEST}), (read_formatted, {"type": "torch", "length": SMALL_TEST}), (read_formatted, {"type": "tensorflow", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}), ] functions_shuffled = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("generating dataset") features = datasets.Features( {"list": datasets.Sequence(datasets.Value("float32")), "numbers": datasets.Value("float32")} ) dataset = generate_example_dataset( os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"list": (100,)}, ) print("first set of iterations") for func, kwargs in functions: print(func.__name__, str(kwargs)) times[func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(dataset, **kwargs) print("shuffling dataset") dataset = dataset.shuffle() print("Second set of iterations (after shuffling") for func, kwargs in functions_shuffled: print("shuffled ", func.__name__, str(kwargs)) times["shuffled " + func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func( dataset, **kwargs ) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
datasets/benchmarks/benchmark_iterating.py/0
{ "file_path": "datasets/benchmarks/benchmark_iterating.py", "repo_id": "datasets", "token_count": 1697 }
60
# Dataset features [`Features`] defines the internal structure of a dataset. It is used to specify the underlying serialization format. What's more interesting to you though is that [`Features`] contains high-level information about everything from the column names and types, to the [`ClassLabel`]. You can think of [`Features`] as the backbone of a dataset. The [`Features`] format is simple: `dict[column_name, column_type]`. It is a dictionary of column name and column type pairs. The column type provides a wide range of options for describing the type of data you have. Let's have a look at the features of the MRPC dataset from the GLUE benchmark: ```py >>> from datasets import load_dataset >>> dataset = load_dataset('glue', 'mrpc', split='train') >>> dataset.features {'idx': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None), 'sentence1': Value(dtype='string', id=None), 'sentence2': Value(dtype='string', id=None), } ``` The [`Value`] feature tells 🤗 Datasets: - The `idx` data type is `int32`. - The `sentence1` and `sentence2` data types are `string`. 🤗 Datasets supports many other data types such as `bool`, `float32` and `binary` to name just a few. <Tip> Refer to [`Value`] for a full list of supported data types. </Tip> The [`ClassLabel`] feature informs 🤗 Datasets the `label` column contains two classes. The classes are labeled `not_equivalent` and `equivalent`. Labels are stored as integers in the dataset. When you retrieve the labels, [`ClassLabel.int2str`] and [`ClassLabel.str2int`] carries out the conversion from integer value to label name, and vice versa. If your data type contains a list of objects, then you want to use the [`Sequence`] feature. Remember the SQuAD dataset? ```py >>> from datasets import load_dataset >>> dataset = load_dataset('squad', split='train') >>> dataset.features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` The `answers` field is constructed using the [`Sequence`] feature because it contains two subfields, `text` and `answer_start`, which are lists of `string` and `int32`, respectively. <Tip> See the [flatten](./process#flatten) section to learn how you can extract the nested subfields as their own independent columns. </Tip> The array feature type is useful for creating arrays of various sizes. You can create arrays with two dimensions using [`Array2D`], and even arrays with five dimensions using [`Array5D`]. ```py >>> features = Features({'a': Array2D(shape=(1, 3), dtype='int32')}) ``` The array type also allows the first dimension of the array to be dynamic. This is useful for handling sequences with variable lengths such as sentences, without having to pad or truncate the input to a uniform shape. ```py >>> features = Features({'a': Array3D(shape=(None, 5, 2), dtype='int32')}) ``` ## Audio feature Audio datasets have a column with type [`Audio`], which contains three important fields: * `array`: the decoded audio data represented as a 1-dimensional array. * `path`: the path to the downloaded audio file. * `sampling_rate`: the sampling rate of the audio data. When you load an audio dataset and call the audio column, the [`Audio`] feature automatically decodes and resamples the audio file: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` <Tip warning={true}> Index into an audio dataset using the row index first and then the `audio` column - `dataset[0]["audio"]` - to avoid decoding and resampling all the audio files in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. </Tip> With `decode=False`, the [`Audio`] type simply gives you the path or the bytes of the audio file, without decoding it into an `array`, ```py >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train").cast_column("audio", Audio(decode=False)) >>> dataset[0] {'audio': {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav'}, 'english_transcription': 'I would like to set up a joint account with my partner', 'intent_class': 11, 'lang_id': 4, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'transcription': 'I would like to set up a joint account with my partner'} ``` ## Image feature Image datasets have a column with type [`Image`], which loads `PIL.Image` objects from images stored as bytes: When you load an image dataset and call the image column, the [`Image`] feature automatically decodes the image file: ```py >>> from datasets import load_dataset, Image >>> dataset = load_dataset("beans", split="train") >>> dataset[0]["image"] <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x125506CF8> ``` <Tip warning={true}> Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding all the image files in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. </Tip> With `decode=False`, the [`Image`] type simply gives you the path or the bytes of the image file, without decoding it into an `PIL.Image`, ```py >>> dataset = load_dataset("beans", split="train").cast_column("image", Image(decode=False)) >>> dataset[0]["image"] {'bytes': None, 'path': '/Users/username/.cache/huggingface/datasets/downloads/extracted/772e7c1fba622cff102b85dd74bcce46e8168634df4eaade7bedd3b8d91d3cd7/train/healthy/healthy_train.265.jpg'} ``` Depending on the dataset, you may get the path to the local downloaded image, or the content of the image as bytes if the dataset is not made of individual files. You can also define a dataset of images from numpy arrays: ```python >>> ds = Dataset.from_dict({"i": [np.zeros(shape=(16, 16, 3), dtype=np.uint8)]}, features=Features({"i": Image()})) ``` And in this case the numpy arrays are encoded into PNG (or TIFF if the pixels values precision is important). For multi-channels arrays like RGB or RGBA, only uint8 is supported. If you use a larger precision, you get a warning and the array is downcasted to uint8. For gray-scale images you can use the integer or float precision you want as long as it is compatible with `Pillow`. A warning is shown if your image integer or float precision is too high, and in this case the array is downcated: an int64 array is downcasted to int32, and a float64 array is downcasted to float32.
datasets/docs/source/about_dataset_features.mdx/0
{ "file_path": "datasets/docs/source/about_dataset_features.mdx", "repo_id": "datasets", "token_count": 2334 }
61
# Cloud storage 🤗 Datasets supports access to cloud storage providers through a `fsspec` FileSystem implementations. You can save and load datasets from any cloud storage in a Pythonic way. Take a look at the following table for some example of supported cloud storage providers: | Storage provider | Filesystem implementation | |----------------------|---------------------------------------------------------------| | Amazon S3 | [s3fs](https://s3fs.readthedocs.io/en/latest/) | | Google Cloud Storage | [gcsfs](https://gcsfs.readthedocs.io/en/latest/) | | Azure Blob/DataLake | [adlfs](https://github.com/fsspec/adlfs) | | Dropbox | [dropboxdrivefs](https://github.com/MarineChap/dropboxdrivefs)| | Google Drive | [gdrivefs](https://github.com/intake/gdrivefs) | | Oracle Cloud Storage | [ocifs](https://ocifs.readthedocs.io/en/latest/) | This guide will show you how to save and load datasets with any cloud storage. Here are examples for S3, Google Cloud Storage, Azure Blob Storage, and Oracle Cloud Object Storage. ## Set up your cloud storage FileSystem ### Amazon S3 1. Install the S3 FileSystem implementation: ``` >>> pip install s3fs ``` 2. Define your credentials To use an anonymous connection, use `anon=True`. Otherwise, include your `aws_access_key_id` and `aws_secret_access_key` whenever you are interacting with a private S3 bucket. ```py >>> storage_options = {"anon": True} # for anonymous connection # or use your credentials >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key} # for private buckets # or use a botocore session >>> import aiobotocore.session >>> s3_session = aiobotocore.session.AioSession(profile="my_profile_name") >>> storage_options = {"session": s3_session} ``` 3. Create your FileSystem instance ```py >>> import s3fs >>> fs = s3fs.S3FileSystem(**storage_options) ``` ### Google Cloud Storage 1. Install the Google Cloud Storage implementation: ``` >>> conda install -c conda-forge gcsfs # or install with pip >>> pip install gcsfs ``` 2. Define your credentials ```py >>> storage_options={"token": "anon"} # for anonymous connection # or use your credentials of your default gcloud credentials or from the google metadata service >>> storage_options={"project": "my-google-project"} # or use your credentials from elsewhere, see the documentation at https://gcsfs.readthedocs.io/ >>> storage_options={"project": "my-google-project", "token": TOKEN} ``` 3. Create your FileSystem instance ```py >>> import gcsfs >>> fs = gcsfs.GCSFileSystem(**storage_options) ``` ### Azure Blob Storage 1. Install the Azure Blob Storage implementation: ``` >>> conda install -c conda-forge adlfs # or install with pip >>> pip install adlfs ``` 2. Define your credentials ```py >>> storage_options = {"anon": True} # for anonymous connection # or use your credentials >>> storage_options = {"account_name": ACCOUNT_NAME, "account_key": ACCOUNT_KEY} # gen 2 filesystem # or use your credentials with the gen 1 filesystem >>> storage_options={"tenant_id": TENANT_ID, "client_id": CLIENT_ID, "client_secret": CLIENT_SECRET} ``` 3. Create your FileSystem instance ```py >>> import adlfs >>> fs = adlfs.AzureBlobFileSystem(**storage_options) ``` ### Oracle Cloud Object Storage 1. Install the OCI FileSystem implementation: ``` >>> pip install ocifs ``` 2. Define your credentials ```py >>> storage_options = {"config": "~/.oci/config", "region": "us-ashburn-1"} ``` 3. Create your FileSystem instance ```py >>> import ocifs >>> fs = ocifs.OCIFileSystem(**storage_options) ``` ## Load and Save your datasets using your cloud storage FileSystem ### Download and prepare a dataset into a cloud storage You can download and prepare a dataset into your cloud storage by specifying a remote `output_dir` in `download_and_prepare`. Don't forget to use the previously defined `storage_options` containing your credentials to write into a private cloud storage. The `download_and_prepare` method works in two steps: 1. it first downloads the raw data files (if any) in your local cache. You can set your cache directory by passing `cache_dir` to [`load_dataset_builder`] 2. then it generates the dataset in Arrow or Parquet format in your cloud storage by iterating over the raw data files. Load a dataset builder from the Hugging Face Hub (see [how to load from the Hugging Face Hub](./loading#hugging-face-hub)): ```py >>> output_dir = "s3://my-bucket/imdb" >>> builder = load_dataset_builder("imdb") >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") ``` Load a dataset builder using a loading script (see [how to load a local loading script](./loading#local-loading-script)): ```py >>> output_dir = "s3://my-bucket/imdb" >>> builder = load_dataset_builder("path/to/local/loading_script/loading_script.py") >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") ``` Use your own data files (see [how to load local and remote files](./loading#local-and-remote-files)): ```py >>> data_files = {"train": ["path/to/train.csv"]} >>> output_dir = "s3://my-bucket/imdb" >>> builder = load_dataset_builder("csv", data_files=data_files) >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") ``` It is highly recommended to save the files as compressed Parquet files to optimize I/O by specifying `file_format="parquet"`. Otherwise the dataset is saved as an uncompressed Arrow file. You can also specify the size of the shards using `max_shard_size` (default is 500MB): ```py >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet", max_shard_size="1GB") ``` #### Dask Dask is a parallel computing library and it has a pandas-like API for working with larger than memory Parquet datasets in parallel. Dask can use multiple threads or processes on a single machine, or a cluster of machines to process data in parallel. Dask supports local data but also data from a cloud storage. Therefore you can load a dataset saved as sharded Parquet files in Dask with ```py import dask.dataframe as dd df = dd.read_parquet(output_dir, storage_options=storage_options) # or if your dataset is split into train/valid/test df_train = dd.read_parquet(output_dir + f"/{builder.name}-train-*.parquet", storage_options=storage_options) df_valid = dd.read_parquet(output_dir + f"/{builder.name}-validation-*.parquet", storage_options=storage_options) df_test = dd.read_parquet(output_dir + f"/{builder.name}-test-*.parquet", storage_options=storage_options) ``` You can find more about dask dataframes in their [documentation](https://docs.dask.org/en/stable/dataframe.html). ## Saving serialized datasets After you have processed your dataset, you can save it to your cloud storage with [`Dataset.save_to_disk`]: ```py # saves encoded_dataset to amazon s3 >>> encoded_dataset.save_to_disk("s3://my-private-datasets/imdb/train", storage_options=storage_options) # saves encoded_dataset to google cloud storage >>> encoded_dataset.save_to_disk("gcs://my-private-datasets/imdb/train", storage_options=storage_options) # saves encoded_dataset to microsoft azure blob/datalake >>> encoded_dataset.save_to_disk("adl://my-private-datasets/imdb/train", storage_options=storage_options) ``` <Tip> Remember to define your credentials in your [FileSystem instance](#set-up-your-cloud-storage-filesystem) `fs` whenever you are interacting with a private cloud storage. </Tip> ## Listing serialized datasets List files from a cloud storage with your FileSystem instance `fs`, using `fs.ls`: ```py >>> fs.ls("my-private-datasets/imdb/train", detail=False) ["dataset_info.json.json","dataset.arrow","state.json"] ``` ### Load serialized datasets When you are ready to use your dataset again, reload it with [`Dataset.load_from_disk`]: ```py >>> from datasets import load_from_disk # load encoded_dataset from cloud storage >>> dataset = load_from_disk("s3://a-public-datasets/imdb/train", storage_options=storage_options) >>> print(len(dataset)) 25000 ```
datasets/docs/source/filesystems.mdx/0
{ "file_path": "datasets/docs/source/filesystems.mdx", "repo_id": "datasets", "token_count": 2640 }
62
# Object detection Object detection models identify something in an image, and object detection datasets are used for applications such as autonomous driving and detecting natural hazards like wildfire. This guide will show you how to apply transformations to an object detection dataset following the [tutorial](https://albumentations.ai/docs/examples/example_bboxes/) from [Albumentations](https://albumentations.ai/docs/). To run these examples, make sure you have up-to-date versions of `albumentations` and `cv2` installed: ``` pip install -U albumentations opencv-python ``` In this example, you'll use the [`cppe-5`](https://huggingface.co/datasets/cppe-5) dataset for identifying medical personal protective equipment (PPE) in the context of the COVID-19 pandemic. Load the dataset and take a look at an example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("cppe-5") >>> example = ds['train'][0] >>> example {'height': 663, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=943x663 at 0x7FC3DC756250>, 'image_id': 15, 'objects': {'area': [3796, 1596, 152768, 81002], 'bbox': [[302.0, 109.0, 73.0, 52.0], [810.0, 100.0, 57.0, 28.0], [160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], 'category': [4, 4, 0, 0], 'id': [114, 115, 116, 117]}, 'width': 943} ``` The dataset has the following fields: - `image`: PIL.Image.Image object containing the image. - `image_id`: The image ID. - `height`: The image height. - `width`: The image width. - `objects`: A dictionary containing bounding box metadata for the objects in the image: - `id`: The annotation id. - `area`: The area of the bounding box. - `bbox`: The object's bounding box (in the [coco](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) format). - `category`: The object's category, with possible values including `Coverall (0)`, `Face_Shield (1)`, `Gloves (2)`, `Goggles (3)` and `Mask (4)`. You can visualize the `bboxes` on the image using some internal torch utilities. To do that, you will need to reference the [`~datasets.ClassLabel`] feature associated with the category IDs so you can look up the string labels: ```py >>> import torch >>> from torchvision.ops import box_convert >>> from torchvision.utils import draw_bounding_boxes >>> from torchvision.transforms.functional import pil_to_tensor, to_pil_image >>> categories = ds['train'].features['objects'].feature['category'] >>> boxes_xywh = torch.tensor(example['objects']['bbox']) >>> boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy') >>> labels = [categories.int2str(x) for x in example['objects']['category']] >>> to_pil_image( ... draw_bounding_boxes( ... pil_to_tensor(example['image']), ... boxes_xyxy, ... colors="red", ... labels=labels, ... ) ... ) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/visualize_detection_example.png"> </div> With `albumentations`, you can apply transforms that will affect the image while also updating the `bboxes` accordingly. In this case, the image is resized to (480, 480), flipped horizontally, and brightened. ```py >>> import albumentations >>> import numpy as np >>> transform = albumentations.Compose([ ... albumentations.Resize(480, 480), ... albumentations.HorizontalFlip(p=1.0), ... albumentations.RandomBrightnessContrast(p=1.0), ... ], bbox_params=albumentations.BboxParams(format='coco', label_fields=['category'])) >>> image = np.array(example['image']) >>> out = transform( ... image=image, ... bboxes=example['objects']['bbox'], ... category=example['objects']['category'], ... ) ``` Now when you visualize the result, the image should be flipped, but the `bboxes` should still be in the right places. ```py >>> image = torch.tensor(out['image']).permute(2, 0, 1) >>> boxes_xywh = torch.stack([torch.tensor(x) for x in out['bboxes']]) >>> boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy') >>> labels = [categories.int2str(x) for x in out['category']] >>> to_pil_image( ... draw_bounding_boxes( ... image, ... boxes_xyxy, ... colors='red', ... labels=labels ... ) ... ) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/visualize_detection_example_transformed.png"> </div> Create a function to apply the transform to a batch of examples: ```py >>> def transforms(examples): ... images, bboxes, categories = [], [], [] ... for image, objects in zip(examples['image'], examples['objects']): ... image = np.array(image.convert("RGB")) ... out = transform( ... image=image, ... bboxes=objects['bbox'], ... category=objects['category'] ... ) ... images.append(torch.tensor(out['image']).permute(2, 0, 1)) ... bboxes.append(torch.tensor(out['bboxes'])) ... categories.append(out['category']) ... return {'image': images, 'bbox': bboxes, 'category': categories} ``` Use the [`~Dataset.set_transform`] function to apply the transform on-the-fly which consumes less disk space. The randomness of data augmentation may return a different image if you access the same example twice. It is especially useful when training a model for several epochs. ```py >>> ds['train'].set_transform(transforms) ``` You can verify the transform works by visualizing the 10th example: ```py >>> example = ds['train'][10] >>> to_pil_image( ... draw_bounding_boxes( ... example['image'], ... box_convert(example['bbox'], 'xywh', 'xyxy'), ... colors='red', ... labels=[categories.int2str(x) for x in example['category']] ... ) ... ) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/visualize_detection_example_transformed_2.png"> </div> <Tip> Now that you know how to process a dataset for object detection, learn [how to train an object detection model](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/YOLOS/Fine_tuning_YOLOS_for_object_detection_on_custom_dataset_(balloon).ipynb) and use it for inference. </Tip>
datasets/docs/source/object_detection.mdx/0
{ "file_path": "datasets/docs/source/object_detection.mdx", "repo_id": "datasets", "token_count": 2299 }
63
# Share a dataset to the Hub The [Hub](https://huggingface.co/datasets) is home to an extensive collection of community-curated and popular research datasets. We encourage you to share your dataset to the Hub to help grow the ML community and accelerate progress for everyone. All contributions are welcome; adding a dataset is just a drag and drop away! Start by [creating a Hugging Face Hub account](https://huggingface.co/join) if you don't have one yet. ## Upload with the Hub UI The Hub's web-based interface allows users without any developer experience to upload a dataset. ### Create a repository A repository hosts all your dataset files, including the revision history, making storing more than one dataset version possible. 1. Click on your profile and select **New Dataset** to create a new dataset repository. 2. Pick a name for your dataset, and choose whether it is a public or private dataset. A public dataset is visible to anyone, whereas a private dataset can only be viewed by you or members of your organization. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/create_repo.png"/> </div> ### Upload dataset 1. Once you've created a repository, navigate to the **Files and versions** tab to add a file. Select **Add file** to upload your dataset files. We support many text, audio, and image data extensions such as `.csv`, `.mp3`, and `.jpg` among many others. For text data extensions like `.csv`, `.json`, `.jsonl`, and `.txt`, we recommend compressing them before uploading to the Hub (to `.zip` or `.gz` file extension for example). Text file extensions are not tracked by Git LFS by default, and if they're greater than 10MB, they will not be committed and uploaded. Take a look at the `.gitattributes` file in your repository for a complete list of tracked file extensions. For this tutorial, you can use the following sample `.csv` files since they're small: <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/train.csv" download>train.csv</a>, <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/test.csv" download>test.csv</a>. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/upload_files.png"/> </div> 2. Drag and drop your dataset files and add a brief descriptive commit message. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/commit_files.png"/> </div> 3. After uploading your dataset files, they are stored in your dataset repository. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/files_stored.png"/> </div> ### Create a Dataset card Adding a Dataset card is super valuable for helping users find your dataset and understand how to use it responsibly. 1. Click on **Create Dataset Card** to create a Dataset card. This button creates a `README.md` file in your repository. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/dataset_card.png"/> </div> 2. At the top, you'll see the **Metadata UI** with several fields to select from like license, language, and task categories. These are the most important tags for helping users discover your dataset on the Hub. When you select an option from each field, they'll be automatically added to the top of the dataset card. You can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1), which has a complete set of (but not required) tag options like `annotations_creators`, to help you choose the appropriate tags. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/metadata_ui.png"/> </div> 3. Click on the **Import dataset card template** link at the top of the editor to automatically create a dataset card template. Filling out the template is a great way to introduce your dataset to the community and help users understand how to use it. For a detailed example of what a good Dataset card should look like, take a look at the [CNN DailyMail Dataset card](https://huggingface.co/datasets/cnn_dailymail). ### Load dataset Once your dataset is stored on the Hub, anyone can load it with the [`load_dataset`] function: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stevhliu/demo") ``` ## Upload with Python Users who prefer to upload a dataset programmatically can use the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library. This library allows users to interact with the Hub from Python. 1. Begin by installing the library: ```bash pip install huggingface_hub ``` 2. To upload a dataset on the Hub in Python, you need to log in to your Hugging Face account: ```bash huggingface-cli login ``` 3. Use the [`push_to_hub()`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.DatasetDict.push_to_hub) function to help you add, commit, and push a file to your repository: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stevhliu/demo") # dataset = dataset.map(...) # do all your processing here >>> dataset.push_to_hub("stevhliu/processed_demo") ``` To set your dataset as private, set the `private` parameter to `True`. This parameter will only work if you are creating a repository for the first time. ```py >>> dataset.push_to_hub("stevhliu/private_processed_demo", private=True) ``` To add a new configuration (or subset) to a dataset or to add a new split (train/validation/test), please refer to the [`Dataset.push_to_hub`] documentation. ### Privacy A private dataset is only accessible by you. Similarly, if you share a dataset within your organization, then members of the organization can also access the dataset. Load a private dataset by providing your authentication token to the `token` parameter: ```py >>> from datasets import load_dataset # Load a private individual dataset >>> dataset = load_dataset("stevhliu/demo", token=True) # Load a private organization dataset >>> dataset = load_dataset("organization/dataset_name", token=True) ``` ## What's next? Congratulations, you've completed the tutorials! 🥳 From here, you can go on to: - Learn more about how to use 🤗 Datasets other functions to [process your dataset](process). - [Stream large datasets](stream) without downloading it locally. - [Define your dataset splits and configurations](repository_structure) or [loading script](dataset_script) and share your dataset with the community. If you have any questions about 🤗 Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10).
datasets/docs/source/upload_dataset.mdx/0
{ "file_path": "datasets/docs/source/upload_dataset.mdx", "repo_id": "datasets", "token_count": 2010 }
64
# Copyright 2021 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from cer import CER cer = CER() class TestCER(unittest.TestCase): def test_cer_case_senstive(self): refs = ["White House"] preds = ["white house"] # S = 2, D = 0, I = 0, N = 11, CER = 2 / 11 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.1818181818) < 1e-6) def test_cer_whitespace(self): refs = ["were wolf"] preds = ["werewolf"] # S = 0, D = 0, I = 1, N = 9, CER = 1 / 9 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.1111111) < 1e-6) refs = ["werewolf"] preds = ["weae wolf"] # S = 1, D = 1, I = 0, N = 8, CER = 0.25 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.25) < 1e-6) # consecutive whitespaces case 1 refs = ["were wolf"] preds = ["were wolf"] # S = 0, D = 0, I = 0, N = 9, CER = 0 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.0) < 1e-6) # consecutive whitespaces case 2 refs = ["were wolf"] preds = ["were wolf"] # S = 0, D = 0, I = 0, N = 9, CER = 0 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.0) < 1e-6) def test_cer_sub(self): refs = ["werewolf"] preds = ["weaewolf"] # S = 1, D = 0, I = 0, N = 8, CER = 0.125 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.125) < 1e-6) def test_cer_del(self): refs = ["werewolf"] preds = ["wereawolf"] # S = 0, D = 1, I = 0, N = 8, CER = 0.125 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.125) < 1e-6) def test_cer_insert(self): refs = ["werewolf"] preds = ["wereolf"] # S = 0, D = 0, I = 1, N = 8, CER = 0.125 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.125) < 1e-6) def test_cer_equal(self): refs = ["werewolf"] char_error_rate = cer.compute(predictions=refs, references=refs) self.assertEqual(char_error_rate, 0.0) def test_cer_list_of_seqs(self): refs = ["werewolf", "I am your father"] char_error_rate = cer.compute(predictions=refs, references=refs) self.assertEqual(char_error_rate, 0.0) refs = ["werewolf", "I am your father", "doge"] preds = ["werxwolf", "I am your father", "doge"] # S = 1, D = 0, I = 0, N = 28, CER = 1 / 28 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.03571428) < 1e-6) def test_correlated_sentences(self): refs = ["My hovercraft", "is full of eels"] preds = ["My hovercraft is full", " of eels"] # S = 0, D = 0, I = 2, N = 28, CER = 2 / 28 # whitespace at the front of " of eels" will be strip during preporcessing # so need to insert 2 whitespaces char_error_rate = cer.compute(predictions=preds, references=refs, concatenate_texts=True) self.assertTrue(abs(char_error_rate - 0.071428) < 1e-6) def test_cer_unicode(self): refs = ["我能吞下玻璃而不伤身体"] preds = [" 能吞虾玻璃而 不霜身体啦"] # S = 3, D = 2, I = 0, N = 11, CER = 5 / 11 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.4545454545) < 1e-6) refs = ["我能吞下玻璃", "而不伤身体"] preds = ["我 能 吞 下 玻 璃", "而不伤身体"] # S = 0, D = 5, I = 0, N = 11, CER = 5 / 11 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.454545454545) < 1e-6) refs = ["我能吞下玻璃而不伤身体"] char_error_rate = cer.compute(predictions=refs, references=refs) self.assertFalse(char_error_rate, 0.0) def test_cer_empty(self): refs = [""] preds = ["Hypothesis"] with self.assertRaises(ValueError): cer.compute(predictions=preds, references=refs) if __name__ == "__main__": unittest.main()
datasets/metrics/cer/test_cer.py/0
{ "file_path": "datasets/metrics/cer/test_cer.py", "repo_id": "datasets", "token_count": 2407 }
65
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Exact Match metric.""" import re import string import numpy as np import datasets _DESCRIPTION = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ _KWARGS_DESCRIPTION = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It's like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It's like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 """ _CITATION = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class ExactMatch(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Value("string", id="sequence"), } ), reference_urls=[], ) def _compute( self, predictions, references, regexes_to_ignore=None, ignore_case=False, ignore_punctuation=False, ignore_numbers=False, ): if regexes_to_ignore is not None: for s in regexes_to_ignore: predictions = np.array([re.sub(s, "", x) for x in predictions]) references = np.array([re.sub(s, "", x) for x in references]) else: predictions = np.asarray(predictions) references = np.asarray(references) if ignore_case: predictions = np.char.lower(predictions) references = np.char.lower(references) if ignore_punctuation: repl_table = string.punctuation.maketrans("", "", string.punctuation) predictions = np.char.translate(predictions, table=repl_table) references = np.char.translate(references, table=repl_table) if ignore_numbers: repl_table = string.digits.maketrans("", "", string.digits) predictions = np.char.translate(predictions, table=repl_table) references = np.char.translate(references, table=repl_table) score_list = predictions == references return {"exact_match": np.mean(score_list) * 100}
datasets/metrics/exact_match/exact_match.py/0
{ "file_path": "datasets/metrics/exact_match/exact_match.py", "repo_id": "datasets", "token_count": 2110 }
66
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Matthews Correlation metric.""" from sklearn.metrics import matthews_corrcoef import datasets _DESCRIPTION = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ _KWARGS_DESCRIPTION = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results['matthews_correlation'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results['matthews_correlation'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results['matthews_correlation'], 2)) -0.25 """ _CITATION = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class MatthewsCorrelation(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("int32"), "references": datasets.Value("int32"), } ), reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html" ], ) def _compute(self, predictions, references, sample_weight=None): return { "matthews_correlation": float(matthews_corrcoef(references, predictions, sample_weight=sample_weight)), }
datasets/metrics/matthews_correlation/matthews_correlation.py/0
{ "file_path": "datasets/metrics/matthews_correlation/matthews_correlation.py", "repo_id": "datasets", "token_count": 1736 }
67
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Recall metric.""" from sklearn.metrics import recall_score import datasets _DESCRIPTION = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ _KWARGS_DESCRIPTION = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ _CITATION = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class Recall(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32")), "references": datasets.Sequence(datasets.Value("int32")), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32"), "references": datasets.Value("int32"), } ), reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"], ) def _compute( self, predictions, references, labels=None, pos_label=1, average="binary", sample_weight=None, zero_division="warn", ): score = recall_score( references, predictions, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight, zero_division=zero_division, ) return {"recall": float(score) if score.size == 1 else score}
datasets/metrics/recall/recall.py/0
{ "file_path": "datasets/metrics/recall/recall.py", "repo_id": "datasets", "token_count": 2604 }
68
# Metric Card for SQuAD v2 ## Metric description This metric wraps the official scoring script for version 2 of the [Stanford Question Answering Dataset (SQuAD)](https://huggingface.co/datasets/squad_v2). SQuAD is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. SQuAD 2.0 combines the 100,000 questions in SQuAD 1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. ## How to use The metric takes two files or two lists - one representing model predictions and the other the references to compare them to. *Predictions* : List of triple for question-answers to score with the following key-value pairs: * `'id'`: the question-answer identification field of the question and answer pair * `'prediction_text'` : the text of the answer * `'no_answer_probability'` : the probability that the question has no answer *References*: List of question-answers dictionaries with the following key-value pairs: * `'id'`: id of the question-answer pair (see above), * `'answers'`: a list of Dict {'text': text of the answer as a string} * `'no_answer_threshold'`: the probability threshold to decide that a question has no answer. ```python from datasets import load_metric squad_metric = load_metric("squad_v2") results = squad_metric.compute(predictions=predictions, references=references) ``` ## Output values This metric outputs a dictionary with 13 values: * `'exact'`: Exact match (the normalized answer exactly match the gold answer) (see the `exact_match` metric (forthcoming)) * `'f1'`: The average F1-score of predicted tokens versus the gold answer (see the [F1 score](https://huggingface.co/metrics/f1) metric) * `'total'`: Number of scores considered * `'HasAns_exact'`: Exact match (the normalized answer exactly match the gold answer) * `'HasAns_f1'`: The F-score of predicted tokens versus the gold answer * `'HasAns_total'`: How many of the questions have answers * `'NoAns_exact'`: Exact match (the normalized answer exactly match the gold answer) * `'NoAns_f1'`: The F-score of predicted tokens versus the gold answer * `'NoAns_total'`: How many of the questions have no answers * `'best_exact'` : Best exact match (with varying threshold) * `'best_exact_thresh'`: No-answer probability threshold associated to the best exact match * `'best_f1'`: Best F1 score (with varying threshold) * `'best_f1_thresh'`: No-answer probability threshold associated to the best F1 The range of `exact_match` is 0-100, where 0.0 means no answers were matched and 100.0 means all answers were matched. The range of `f1` is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall. The range of `total` depends on the length of predictions/references: its minimal value is 0, and maximal value is the total number of questions in the predictions and references. ### Values from popular papers The [SQuAD v2 paper](https://arxiv.org/pdf/1806.03822.pdf) reported an F1 score of 66.3% and an Exact Match score of 63.4%. They also report that human performance on the dataset represents an F1 score of 89.5% and an Exact Match score of 86.9%. For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/squad). ## Examples Maximal values for both exact match and F1 (perfect match): ```python from datasets import load_metric squad_v2_ metric = load_metric("squad_v2") predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] results = squad_v2_metric.compute(predictions=predictions, references=references) results {'exact': 100.0, 'f1': 100.0, 'total': 1, 'HasAns_exact': 100.0, 'HasAns_f1': 100.0, 'HasAns_total': 1, 'best_exact': 100.0, 'best_exact_thresh': 0.0, 'best_f1': 100.0, 'best_f1_thresh': 0.0} ``` Minimal values for both exact match and F1 (no match): ```python from datasets import load_metric squad_metric = load_metric("squad_v2") predictions = [{'prediction_text': '1999', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] results = squad_v2_metric.compute(predictions=predictions, references=references) results {'exact': 0.0, 'f1': 0.0, 'total': 1, 'HasAns_exact': 0.0, 'HasAns_f1': 0.0, 'HasAns_total': 1, 'best_exact': 0.0, 'best_exact_thresh': 0.0, 'best_f1': 0.0, 'best_f1_thresh': 0.0} ``` Partial match (2 out of 3 answers correct) : ```python from datasets import load_metric squad_metric = load_metric("squad_v2") predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}, {'prediction_text': 'Beyonce', 'id': '56d2051ce7d4791d0090260b', 'no_answer_probability': 0.}, {'prediction_text': 'climate change', 'id': '5733b5344776f419006610e1', 'no_answer_probability': 0.}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}, {'answers': {'answer_start': [233], 'text': ['Beyoncé and Bruno Mars']}, 'id': '56d2051ce7d4791d0090260b'}, {'answers': {'answer_start': [891], 'text': ['climate change']}, 'id': '5733b5344776f419006610e1'}] results = squad_v2_metric.compute(predictions=predictions, references=references) results {'exact': 66.66666666666667, 'f1': 66.66666666666667, 'total': 3, 'HasAns_exact': 66.66666666666667, 'HasAns_f1': 66.66666666666667, 'HasAns_total': 3, 'best_exact': 66.66666666666667, 'best_exact_thresh': 0.0, 'best_f1': 66.66666666666667, 'best_f1_thresh': 0.0} ``` ## Limitations and bias This metric works only with the datasets in the same format as the [SQuAD v.2 dataset](https://huggingface.co/datasets/squad_v2). The SQuAD datasets do contain a certain amount of noise, such as duplicate questions as well as missing answers, but these represent a minority of the 100,000 question-answer pairs. Also, neither exact match nor F1 score reflect whether models do better on certain types of questions (e.g. who questions) or those that cover a certain gender or geographical area -- carrying out more in-depth error analysis can complement these numbers. ## Citation ```bibtex @inproceedings{Rajpurkar2018SQuAD2, title={Know What You Don't Know: Unanswerable Questions for SQuAD}, author={Pranav Rajpurkar and Jian Zhang and Percy Liang}, booktitle={ACL 2018}, year={2018} } ``` ## Further References - [The Stanford Question Answering Dataset: Background, Challenges, Progress (blog post)](https://rajpurkar.github.io/mlx/qa-and-squad/) - [Hugging Face Course -- Question Answering](https://huggingface.co/course/chapter7/7)
datasets/metrics/squad_v2/README.md/0
{ "file_path": "datasets/metrics/squad_v2/README.md", "repo_id": "datasets", "token_count": 2372 }
69
<jupyter_start><jupyter_text>**⚠️ This notebook is deprecated in favor of the [Quickstart notebook](https://github.com/huggingface/notebooks/blob/main/datasets_doc/quickstart.ipynb)** HuggingFace 🤗 Datasets library - Quick overviewModels come and go (linear models, LSTM, Transformers, ...) but two core elements have consistently been the beating heart of Natural Language Processing: Datasets & Metrics🤗 Datasets is a fast and efficient library to easily share and load datasets, already providing access to the public datasets in the [Hugging Face Hub](https://huggingface.co/datasets).The library has several interesting features (besides easy access to datasets):- Build-in interoperability with PyTorch, Tensorflow 2, Pandas and Numpy- Lighweight and fast library with a transparent and pythonic API- Strive on large datasets: frees you from RAM memory limits, all datasets are memory-mapped on drive by default.- Smart caching with an intelligent `tf.data`-like cache: never wait for your data to process several times🤗 Datasets originated from a fork of the awesome Tensorflow-Datasets and the HuggingFace team want to deeply thank the team behind this amazing library and user API. We have tried to keep a layer of compatibility with `tfds` and can provide conversion from one format to the other.To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. Main datasets APIThis notebook is a quick dive in the main user API for loading datasets in `datasets`<jupyter_code># install datasets !pip install datasets # Let's import the library. We typically only need at most two methods: from datasets import list_datasets, load_dataset from pprint import pprint<jupyter_output><empty_output><jupyter_text>Listing the currently available datasets<jupyter_code># Currently available datasets datasets = list_datasets() print(f"🤩 Currently {len(datasets)} datasets are available on the hub:") pprint(datasets[:100] + [f"{len(datasets) - 100} more..."], compact=True) # You can access various attributes of the datasets before downloading them squad_dataset = list_datasets(with_details=True)[datasets.index('squad')] pprint(squad_dataset.__dict__) # It's a simple python dataclass<jupyter_output>{'_id': '621ffdd236468d709f181f95', 'author': None, 'cardData': {'annotations_creators': ['crowdsourced'], 'dataset_info': {'config_name': 'plain_text', 'dataset_size': 89789763, 'download_size': 35142551, 'features': [{'dtype': 'string', 'name': 'id'}, {'dtype': 'string', 'name': 'title'}, {'dtype': 'string', 'name': 'context'}, {'dtype': 'string', 'name': 'question'}, {'name': 'answers', 'sequence': [{'dtype': 'string', 'name': 'text'}, [...]<jupyter_text>An example with SQuAD<jupyter_code># Downloading and loading a dataset dataset = load_dataset('squad', split='validation[:10%]')<jupyter_output>WARNING:datasets.builder:Found cached dataset squad (/root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453)<jupyter_text>This call to `datasets.load_dataset()` does the following steps under the hood:1. Download and import in the library the **SQuAD python processing script** from HuggingFace AWS bucket if it's not already stored in the library. You can find the SQuAD processing script [here](https://github.com/huggingface/datasets/tree/master/datasets/squad/squad.py) for instance. Processing scripts are small python scripts which define the info (citation, description) and format of the dataset and contain the URL to the original SQuAD JSON files and the code to load examples from the original SQuAD JSON files.2. Run the SQuAD python processing script which will: - **Download the SQuAD dataset** from the original URL (see the script) if it's not already downloaded and cached. - **Process and cache** all SQuAD in a structured Arrow table for each standard splits stored on the drive. Arrow table are arbitrarily long tables, typed with types that can be mapped to numpy/pandas/python standard types and can store nested objects. They can be directly access from drive, loaded in RAM or even streamed over the web. 3. Return a **dataset built from the splits** asked by the user (default: all); in the above example we create a dataset with the first 10% of the validation split.<jupyter_code># Informations on the dataset (description, citation, size, splits, format...) # are provided in `dataset.info` (a simple python dataclass) and also as direct attributes in the dataset object pprint(dataset.info.__dict__)<jupyter_output>{'builder_name': 'squad', 'citation': '@article{2016arXiv160605250R,\n' ' author = {{Rajpurkar}, Pranav and {Zhang}, Jian and ' '{Lopyrev},\n' ' Konstantin and {Liang}, Percy},\n' ' title = "{SQuAD: 100,000+ Questions for Machine ' 'Comprehension of Text}",\n' ' journal = {arXiv e-prints},\n' ' year = 2016,\n' ' eid = {arXiv:1606.05250},\n' ' pages = {arXiv:1606.05250},\n' 'archivePrefix = {arXiv},\n' ' eprint = {1606.05250},\n' '}\n', 'config_name': 'plain_text', 'dataset_size': 89819092, 'description': 'Stanford Question Answering Dataset (SQuAD) is a reading ' 'comprehension dataset, consisting of questions posed by ' 'crowdworkers on a set of Wikipedia articles, where the answer ' 'to every question is a segment of[...]<jupyter_text>Inspecting and using the dataset: elements, slices and columns The returned `Dataset` object is a memory mapped dataset that behaves similarly to a normal map-style dataset. It is backed by an Apache Arrow table which allows many interesting features.<jupyter_code>print(dataset)<jupyter_output>Dataset({ features: ['id', 'title', 'context', 'question', 'answers'], num_rows: 1057 })<jupyter_text>You can query it's length and get items or slices like you would do normally with a python mapping.<jupyter_code>print(f"👉 Dataset len(dataset): {len(dataset)}") print("\n👉 First item 'dataset[0]':") pprint(dataset[0]) # Or get slices with several examples: print("\n👉Slice of the two items 'dataset[10:12]':") pprint(dataset[10:12]) # You can get a full column of the dataset by indexing with its name as a string: print(dataset['question'][:10])<jupyter_output>['Which NFL team represented the AFC at Super Bowl 50?', 'Which NFL team represented the NFC at Super Bowl 50?', 'Where did Super Bowl 50 take place?', 'Which NFL team won Super Bowl 50?', 'What color was used to emphasize the 50th anniversary of the Super Bowl?', 'What was the theme of Super Bowl 50?', 'What day was the game played on?', 'What is the AFC short for?', 'What was the theme of Super Bowl 50?', 'What does AFC stand for?']<jupyter_text>The `__getitem__` method will return different format depending on the type of query:- Items like `dataset[0]` are returned as dict of elements.- Slices like `dataset[10:20]` are returned as dict of lists of elements.- Columns like `dataset['question']` are returned as a list of elements.This may seems surprising at first but in our experiments it's actually a lot easier to use for data processing than returning the same format for each of these views on the dataset. In particular, you can easily iterate along columns in slices, and also naturally permute consecutive indexings with identical results as showed here by permuting column indexing with elements and slices:<jupyter_code>print(dataset[0]['question'] == dataset['question'][0]) print(dataset[10:20]['context'] == dataset['context'][10:20])<jupyter_output>True True<jupyter_text>Dataset are internally typed and structuredThe dataset is backed by one (or several) Apache Arrow tables which are typed and allows for fast retrieval and access as well as arbitrary-size memory mapping.This means respectively that the format for the dataset is clearly defined and that you can load datasets of arbitrary size without worrying about RAM memory limitation (basically the dataset take no space in RAM, it's directly read from drive when needed with fast IO access).<jupyter_code># You can inspect the dataset column names and types print("Column names:") pprint(dataset.column_names) print("Features:") pprint(dataset.features)<jupyter_output>Column names: ['id', 'title', 'context', 'question', 'answers'] Features: {'answers': Sequence(feature={'answer_start': Value(dtype='int32', id=None), 'text': Value(dtype='string', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)}<jupyter_text>Additional misc properties<jupyter_code># Datasets also have shapes informations print("The number of rows", dataset.num_rows, "also available as len(dataset)", len(dataset)) print("The number of columns", dataset.num_columns) print("The shape (rows, columns)", dataset.shape)<jupyter_output>The number of rows 1057 also available as len(dataset) 1057 The number of columns 5 The shape (rows, columns) (1057, 5)<jupyter_text>Modifying the dataset with `dataset.map`Now that we know how to inspect our dataset we also want to update it. For that there is a powerful method `.map()` which is inspired by `tf.data` map method and that you can use to apply a function to each examples, independently or in batch.`.map()` takes a callable accepting a dict as argument (same dict as the one returned by `dataset[i]`) and iterate over the dataset by calling the function on each example.<jupyter_code># Let's print the length of each `context` string in our subset of the dataset # (10% of the validation i.e. 1057 examples) dataset.map(lambda example: print(len(example['context']), end=','))<jupyter_output><empty_output><jupyter_text>This is basically the same as doing```pythonfor example in dataset: function(example)``` The above examples was a bit verbose. We can control the logging level of 🤗 Datasets with it's logging module:<jupyter_code>from datasets import logging logging.set_verbosity_warning() dataset.map(lambda example: print(len(example['context']), end=',')) # Let's keep it verbose for our tutorial though from datasets import logging logging.set_verbosity_info()<jupyter_output><empty_output><jupyter_text>The above example had no effect on the dataset because the method we supplied to `.map()` didn't return a `dict` or a `abc.Mapping` that could be used to update the examples in the dataset.In such a case, `.map()` will return the same dataset (`self`).Now let's see how we can use a method that actually modify the dataset. Modifying the dataset example by example The main interest of `.map()` is to update and modify the content of the table and leverage smart caching and fast backend.To use `.map()` to update elements in the table you need to provide a function with the following signature: `function(example: dict) -> dict`.<jupyter_code># Let's add a prefix 'My cute title: ' to each of our titles def add_prefix_to_title(example): example['title'] = 'My cute title: ' + example['title'] return example prefixed_dataset = dataset.map(add_prefix_to_title) print(prefixed_dataset.unique('title')) # `.unique()` is a super fast way to print the unique elemnts in a column (see the doc for all the methods)<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-242ccd893f32bdf9.arrow<jupyter_text>This call to `.map()` compute and return the updated table. It will also store the updated table in a cache file indexed by the current state and the mapped function.A subsequent call to `.map()` (even in another python session) will reuse the cached file instead of recomputing the operation.You can test this by running again the previous cell, you will see that the result are directly loaded from the cache and not re-computed again.The updated dataset returned by `.map()` is (again) directly memory mapped from drive and not allocated in RAM. The function you provide to `.map()` should accept an input with the format of an item of the dataset: `function(dataset[0])` and return a python dict.The columns and type of the outputs can be different than the input dict. In this case the new keys will be added as additional columns in the dataset.Bascially each dataset example dict is updated with the dictionary returned by the function like this: `example.update(function(example))`.<jupyter_code># Since the input example dict is updated with our function output dict, # we can actually just return the updated 'title' field titled_dataset = dataset.map(lambda example: {'title': 'My cutest title: ' + example['title']}) print(titled_dataset.unique('title'))<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-4f3eee21db868c87.arrow<jupyter_text>Removing columnsYou can also remove columns when running map with the `remove_columns=List[str]` argument.<jupyter_code># This will remove the 'title' column while doing the update (after having send it the the mapped function so you can use it in your function!) less_columns_dataset = dataset.map(lambda example: {'new_title': 'Wouhahh: ' + example['title']}, remove_columns=['title']) print(less_columns_dataset.column_names) print(less_columns_dataset.unique('new_title'))<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-2800c1727354fbe2.arrow<jupyter_text>Using examples indicesWith `with_indices=True`, dataset indices (from `0` to `len(dataset)`) will be supplied to the function which must thus have the following signature: `function(example: dict, indice: int) -> dict`<jupyter_code># This will add the index in the dataset to the 'question' field with_indices_dataset = dataset.map(lambda example, idx: {'question': f'{idx}: ' + example['question']}, with_indices=True) pprint(with_indices_dataset['question'][:5])<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-e23b98819de39aea.arrow<jupyter_text>Modifying the dataset with batched updates `.map()` can also work with batch of examples (slices of the dataset).This is particularly interesting if you have a function that can handle batch of inputs like the tokenizers of HuggingFace `tokenizers`.To work on batched inputs set `batched=True` when calling `.map()` and supply a function with the following signature: `function(examples: Dict[List]) -> Dict[List]` or, if you use indices, `function(examples: Dict[List], indices: List[int]) -> Dict[List]`).Bascially, your function should accept an input with the format of a slice of the dataset: `function(dataset[:10])`.<jupyter_code>!pip install transformers # Let's import a fast tokenizer that can work on batched inputs # (the 'Fast' tokenizers in HuggingFace) from transformers import BertTokenizerFast, logging as transformers_logging transformers_logging.set_verbosity_warning() tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased') # Now let's batch tokenize our dataset 'context' encoded_dataset = dataset.map(lambda example: tokenizer(example['context']), batched=True) print("encoded_dataset[0]") pprint(encoded_dataset[0], compact=True) # we have added additional columns pprint(encoded_dataset.column_names) # Let show a more complex processing with the full preparation of the SQuAD dataset # for training a model from Transformers def convert_to_features(batch): # Tokenize contexts and questions (as pairs of inputs) encodings = tokenizer(batch['context'], batch['question'], truncation=True) # Compute start and end tokens for labels start_positions, end_positions = [], [] for i, answer in enumerate(batch['answers']): first_char = answer['answer_start'][0] last_char = first_char + len(answer['text'][0]) - 1 start_positions.append(encodings.char_to_token(i, first_char)) end_positions.append(encodings.char_to_token(i, last_char)) encodings.update({'start_positions': start_positions, 'end_positions': end_positions}) return encodings encoded_dataset = dataset.map(convert_to_features, batched=True) # Now our dataset comprise the labels for the start and end position # as well as the offsets for converting back tokens # in span of the original string for evaluation print("column_names", encoded_dataset.column_names) print("start_positions", encoded_dataset[:5]['start_positions'])<jupyter_output>column_names ['id', 'title', 'context', 'question', 'answers', 'input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] start_positions [34, 45, 80, 34, 98]<jupyter_text>Image datasets Images are loaded using Pillow:<jupyter_code>image_dataset = load_dataset("cats_vs_dogs", split="train") image_dataset[0] image_dataset[0]["image"]<jupyter_output><empty_output><jupyter_text>Audio datasets Audio files are decoded using torchaudio or librosa using to the sampling rate of your choice.To read mp3 files you need ffmpeg and restart your runtime<jupyter_code>!add-apt-repository -y ppa:jonathonf/ffmpeg-4 && apt update && apt install -y ffmpeg from datasets import load_dataset audio_dataset = load_dataset("common_voice", "fi", split="train") audio_dataset[0] audio_dataset[0]["audio"]["array"], audio_dataset[0]["audio"]["sampling_rate"]<jupyter_output><empty_output><jupyter_text>Audio decoding and resampling is done in-the-fly when accessing examples. You can change the sampling rate this way:<jupyter_code>from datasets import Audio audio_dataset = audio_dataset.cast_column("audio", Audio(sampling_rate=16_000)) audio_dataset[0]["audio"]["array"], audio_dataset[0]["audio"]["sampling_rate"]<jupyter_output><empty_output><jupyter_text>Formatting outputs for PyTorch, Tensorflow, Numpy, PandasNow that we have tokenized our inputs, we probably want to use this dataset in a `torch.Dataloader` or a `tf.data.Dataset`. There are various ways to approach this.Using the `set_format()` method, we can:- format the indexing (`__getitem__`) to return numpy/pytorch/tensorflow tensors, instead of python objects, and- format the indexing (`__getitem__`) to return only the subset of the columns that we need for our model inputs. We don't want the columns `id` or `title` as inputs to train our model, but we could still want to keep them in the dataset, for instance for the evaluation of the model. This is handled by the `.set_format(type: Union[None, str], columns: Union[None, str, List[str]])` where:- `type` define the return type for our dataset `__getitem__` method and is one of `[None, 'numpy', 'pandas', 'torch', 'tensorflow']` (`None` means return python objects), and- `columns` define the columns returned by `__getitem__` and takes the name of a column in the dataset or a list of columns to return (`None` means return all columns).<jupyter_code>columns_to_return = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] # Uncomment whichever one is appropriate for you # encoded_dataset.set_format(type='torch', columns=columns_to_return) encoded_dataset.set_format(type='tensorflow', columns=columns_to_return) # Our dataset indexing output is now ready for being used in a pytorch dataloader pprint(encoded_dataset[1], compact=True) # Note that the columns are not removed from the dataset, just not returned when calling __getitem__ # Similarly the inner type of the dataset is not changed to torch.Tensor, the conversion and filtering is done on-the-fly when querying the dataset print(encoded_dataset.column_names) # We can remove the formatting with `.reset_format()` # or, identically, a call to `.set_format()` with no arguments encoded_dataset.reset_format() pprint(encoded_dataset[1], compact=True) # The current format can be checked with `.format`, # which is a dict of the type and formatting pprint(encoded_dataset.format)<jupyter_output>{'columns': ['id', 'title', 'context', 'question', 'answers', 'input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None}<jupyter_text>There is also a convenience method, `to_tf_dataset()`, for the creation of `tf.data.Dataset` objects directly from a HuggingFace `Dataset`. An example will be shown below - when using this method, it is sufficient to pass the `columns` argument and your `DataCollator` - make sure you set the `return_tensors` argument of your `DataCollator` to `tf` or `np`, though, because TensorFlow won't be happy if you start passing it PyTorch Tensors! Wrapping this all upLet's wrap this all up with the full code to load and prepare SQuAD for training a PyTorch or TensorFlow model from HuggingFace `transformers` library.<jupyter_code>!pip install transformers import torch from datasets import load_dataset from transformers import BertTokenizerFast # Load our training dataset and tokenizer dataset = load_dataset('squad') tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased') def get_correct_alignement(context, answer): """ Some original examples in SQuAD have indices wrong by 1 or 2 character. We test and fix this here. """ gold_text = answer['text'][0] start_idx = answer['answer_start'][0] end_idx = start_idx + len(gold_text) if context[start_idx:end_idx] == gold_text: return start_idx, end_idx # When the gold label position is good elif context[start_idx-1:end_idx-1] == gold_text: return start_idx-1, end_idx-1 # When the gold label is off by one character elif context[start_idx-2:end_idx-2] == gold_text: return start_idx-2, end_idx-2 # When the gold label is off by two character else: raise ValueError() # Tokenize our training dataset def convert_to_features(example_batch): # Tokenize contexts and questions (as pairs of inputs) encodings = tokenizer(example_batch['context'], example_batch['question'], truncation=True) # Compute start and end tokens for labels using Transformers's fast tokenizers alignement methods. start_positions, end_positions = [], [] for i, (context, answer) in enumerate(zip(example_batch['context'], example_batch['answers'])): start_idx, end_idx = get_correct_alignement(context, answer) start_positions.append(encodings.char_to_token(i, start_idx)) end_positions.append(encodings.char_to_token(i, end_idx-1)) encodings.update({'start_positions': start_positions, 'end_positions': end_positions}) return encodings encoded_dataset = dataset.map(convert_to_features, batched=True)<jupyter_output>INFO:datasets.builder:No config specified, defaulting to the single config: squad/plain_text INFO:datasets.info:Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/squad/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453 INFO:datasets.builder:Overwrite dataset info from restored data version if exists. INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453 WARNING:datasets.builder:Found cached dataset squad (/root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453) INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453<jupyter_text>That's the end of the shared preprocessing! Next, for Torch, we set our dataset format and create a `dataloader`. If you're using TensorFlow, skip to the next block.<jupyter_code># Format our dataset to outputs torch.Tensor to train a pytorch model columns = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] encoded_dataset.set_format(type='torch', columns=columns) # Instantiate a PyTorch Dataloader around our dataset # Let's do dynamic batching (pad on the fly with our own collate_fn) def collate_fn(examples): return tokenizer.pad(examples, return_tensors='pt') dataloader = torch.utils.data.DataLoader(encoded_dataset['train'], collate_fn=collate_fn, batch_size=8)<jupyter_output><empty_output><jupyter_text>For TensorFlow, we use the `to_tf_dataset()` method to get a `tf.data.Dataset`.<jupyter_code>columns = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] # Let's do dynamic batching (pad on the fly with our own collate_fn) def collate_fn(examples): return tokenizer.pad(examples, return_tensors='np') # to_tf_dataset() returns a tf.data.Dataset that we can pass straight to model.fit(). encoded_tf_dataset = encoded_dataset['train'].to_tf_dataset( columns=columns, collate_fn=collate_fn, batch_size=8, shuffle=True, )<jupyter_output>You're using a BertTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.<jupyter_text>Next, we initialize our model. The next two blocks show model creation and training in Torch. For TensorFlow, skip ahead!<jupyter_code># Let's load a pretrained Bert model and a simple optimizer from transformers import AutoModelForQuestionAnswering model = AutoModelForQuestionAnswering.from_pretrained('bert-base-cased', return_dict=True) optimizer = torch.optim.Adam(model.parameters(), lr=1e-5) # Now let's train our model device = 'cuda' if torch.cuda.is_available() else 'cpu' model.train().to(device) for i, batch in enumerate(dataloader): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() model.zero_grad() print(f'Step {i} - loss: {loss:.3}') if i > 5: break<jupyter_output>Step 0 - loss: 5.65 Step 1 - loss: 5.63 Step 2 - loss: 5.18 Step 3 - loss: 5.6 Step 4 - loss: 5.29 Step 5 - loss: 5.51 Step 6 - loss: 5.49<jupyter_text>Next, we'll initialize and train our TensorFlow model. Note the lack of a loss argument when we `compile()` our model here! All Transformers models support computing loss internally. When no loss argument is provided, the model will use its internal loss - this is especially helpful for cases like QA models, when the loss can be quite complex.<jupyter_code># Let's load a pretrained Bert model and a simple optimizer from transformers import TFAutoModelForQuestionAnswering import tensorflow as tf model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-cased') # No loss argument! model.compile(optimizer=tf.keras.optimizers.Adam(1e-5))<jupyter_output>All model checkpoint layers were used when initializing TFBertForQuestionAnswering. Some layers of TFBertForQuestionAnswering were not initialized from the model checkpoint at bert-base-cased and are newly initialized: ['qa_outputs'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! To disable this behaviour please pass a loss argument, or explicitly pass `loss=None` if you do not want your model to compute a loss.<jupyter_text>Now that all the preprocessing is done, training is an extremely comforting single line of Keras. We stop training early with the `steps_per_epoch` argument - you should probably leave that one out of your actual production code!<jupyter_code>model.fit(encoded_tf_dataset, epochs=1, steps_per_epoch=3)<jupyter_output>3/3 [==============================] - 73s 927ms/step - loss: 5.5575<jupyter_text>Example with a NER metric: `seqeval`<jupyter_code>!pip install evaluate seqeval import evaluate ner_metric = evaluate.load('seqeval') references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] ner_metric.compute(predictions=predictions, references=references)<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Requirement already satisfied: evaluate in /usr/local/lib/python3.10/dist-packages (0.4.0) Requirement already satisfied: seqeval in /usr/local/lib/python3.10/dist-packages (1.2.2) Requirement already satisfied: datasets>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from evaluate) (2.12.0) Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from evaluate) (1.22.4) Requirement already satisfied: dill in /usr/local/lib/python3.10/dist-packages (from evaluate) (0.3.6) Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from evaluate) (1.5.3) Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from evaluate) (2.27.1) Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from evaluate) (4.65.0) Requirement already satisfied: xxhash in /usr/local/lib/py[...]
datasets/notebooks/Overview.ipynb/0
{ "file_path": "datasets/notebooks/Overview.ipynb", "repo_id": "datasets", "token_count": 10406 }
70
import logging import os from argparse import ArgumentParser from pathlib import Path from shutil import copyfile, rmtree from typing import Generator import datasets.config from datasets.builder import DatasetBuilder from datasets.commands import BaseDatasetsCLICommand from datasets.download.download_manager import DownloadMode from datasets.load import dataset_module_factory, import_main_class from datasets.utils.info_utils import VerificationMode from datasets.utils.logging import ERROR, get_logger logger = get_logger(__name__) def _test_command_factory(args): return TestCommand( args.dataset, args.name, args.cache_dir, args.data_dir, args.all_configs, args.save_info or args.save_infos, args.ignore_verifications, args.force_redownload, args.clear_cache, ) class TestCommand(BaseDatasetsCLICommand): __test__ = False # to tell pytest it's not a test class @staticmethod def register_subcommand(parser: ArgumentParser): test_parser = parser.add_parser("test", help="Test dataset implementation.") test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name") test_parser.add_argument( "--cache_dir", type=str, default=None, help="Cache directory where the datasets are stored.", ) test_parser.add_argument( "--data_dir", type=str, default=None, help="Can be used to specify a manual directory to get the files from.", ) test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") test_parser.add_argument( "--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)" ) test_parser.add_argument( "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks.", ) test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") test_parser.add_argument( "--clear_cache", action="store_true", help="Remove downloaded files and cached datasets after each config test", ) # aliases test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info") test_parser.add_argument("dataset", type=str, help="Name of the dataset to download") test_parser.set_defaults(func=_test_command_factory) def __init__( self, dataset: str, name: str, cache_dir: str, data_dir: str, all_configs: bool, save_infos: bool, ignore_verifications: bool, force_redownload: bool, clear_cache: bool, ): self._dataset = dataset self._name = name self._cache_dir = cache_dir self._data_dir = data_dir self._all_configs = all_configs self._save_infos = save_infos self._ignore_verifications = ignore_verifications self._force_redownload = force_redownload self._clear_cache = clear_cache if clear_cache and not cache_dir: print( "When --clear_cache is used, specifying a cache directory is mandatory.\n" "The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n" "Please provide a --cache_dir that will be used to test the dataset script." ) exit(1) if save_infos: self._ignore_verifications = True def run(self): logging.getLogger("filelock").setLevel(ERROR) if self._name is not None and self._all_configs: print("Both parameters `config` and `all_configs` can't be used at once.") exit(1) path, config_name = self._dataset, self._name module = dataset_module_factory(path) builder_cls = import_main_class(module.module_path) n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1 def get_builders() -> Generator[DatasetBuilder, None, None]: if self._all_configs and builder_cls.BUILDER_CONFIGS: for i, config in enumerate(builder_cls.BUILDER_CONFIGS): if "config_name" in module.builder_kwargs: yield builder_cls( cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) else: yield builder_cls( config_name=config.name, cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) else: if "config_name" in module.builder_kwargs: yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs) else: yield builder_cls( config_name=config_name, cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) for j, builder in enumerate(get_builders()): print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})") builder._record_infos = os.path.exists( os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME) ) # record checksums only if we need to update a (deprecated) dataset_infos.json builder.download_and_prepare( download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS if not self._force_redownload else DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.NO_CHECKS if self._ignore_verifications else VerificationMode.ALL_CHECKS, try_from_hf_gcs=False, ) builder.as_dataset() if self._save_infos: builder._save_infos() # If save_infos=True, the dataset card (README.md) is created next to the loaded module file. # The dataset_infos are saved in the YAML part of the README.md # Let's move it to the original directory of the dataset script, to allow the user to # upload them on S3 at the same time afterwards. if self._save_infos: dataset_readme_path = os.path.join( builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILENAME ) name = Path(path).name + ".py" combined_path = os.path.join(path, name) if os.path.isfile(path): dataset_dir = os.path.dirname(path) elif os.path.isfile(combined_path): dataset_dir = path elif os.path.isdir(path): # for local directories containing only data files dataset_dir = path else: # in case of a remote dataset dataset_dir = None print(f"Dataset card saved at {dataset_readme_path}") # Move dataset_info back to the user if dataset_dir is not None: user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME) copyfile(dataset_readme_path, user_dataset_readme_path) print(f"Dataset card saved at {user_dataset_readme_path}") # If clear_cache=True, the download folder and the dataset builder cache directory are deleted if self._clear_cache: if os.path.isdir(builder._cache_dir): logger.warning(f"Clearing cache at {builder._cache_dir}") rmtree(builder._cache_dir) download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR) if os.path.isdir(download_dir): logger.warning(f"Clearing cache at {download_dir}") rmtree(download_dir) print("Test successful.")
datasets/src/datasets/commands/test.py/0
{ "file_path": "datasets/src/datasets/commands/test.py", "repo_id": "datasets", "token_count": 4096 }
71
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from fsspec.implementations.local import LocalFileSystem from ..utils.deprecation_utils import deprecated from . import compression _has_s3fs = importlib.util.find_spec("s3fs") is not None if _has_s3fs: from .s3filesystem import S3FileSystem # noqa: F401 COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [ compression.Bz2FileSystem, compression.GzipFileSystem, compression.Lz4FileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) @deprecated( "This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead." ) def extract_path_from_uri(dataset_path: str) -> str: """ Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`). Args: dataset_path (`str`): Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory. """ if "://" in dataset_path: dataset_path = dataset_path.split("://")[1] return dataset_path def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool: """ Checks if `fs` is a remote filesystem. Args: fs (`fsspec.spec.AbstractFileSystem`): An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`]. """ return not isinstance(fs, LocalFileSystem) def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str): """ Renames the file `src` in `fs` to `dst`. """ if not is_remote_filesystem(fs): # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst)) else: fs.mv(src, dst, recursive=True) def _reset_fsspec_lock() -> None: """ Clear reference to the loop and thread. This is necessary otherwise HTTPFileSystem hangs in the ML training loop. Only required for fsspec >= 0.9.0 See https://github.com/fsspec/gcsfs/issues/379 """ if hasattr(fsspec.asyn, "reset_lock"): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: fsspec.asyn.iothread[0] = None fsspec.asyn.loop[0] = None fsspec.asyn.lock = threading.Lock()
datasets/src/datasets/filesystems/__init__.py/0
{ "file_path": "datasets/src/datasets/filesystems/__init__.py", "repo_id": "datasets", "token_count": 1096 }
72
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import tqdm as hf_tqdm from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class JsonDatasetReader(AbstractDatasetReader): def __init__( self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit] = None, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, field: Optional[str] = None, num_proc: Optional[int] = None, **kwargs, ): super().__init__( path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) self.field = field path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} self.builder = Json( cache_dir=cache_dir, data_files=path_or_paths, features=features, field=field, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, # try_from_hf_gcs=try_from_hf_gcs, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset class JsonDatasetWriter: def __init__( self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int] = None, num_proc: Optional[int] = None, **to_json_kwargs, ): if num_proc is not None and num_proc <= 0: raise ValueError(f"num_proc {num_proc} must be an integer > 0.") self.dataset = dataset self.path_or_buf = path_or_buf self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE self.num_proc = num_proc self.encoding = "utf-8" self.to_json_kwargs = to_json_kwargs def write(self) -> int: _ = self.to_json_kwargs.pop("path_or_buf", None) orient = self.to_json_kwargs.pop("orient", "records") lines = self.to_json_kwargs.pop("lines", True if orient == "records" else False) if "index" not in self.to_json_kwargs and orient in ["split", "table"]: self.to_json_kwargs["index"] = False compression = self.to_json_kwargs.pop("compression", None) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f"`datasets` currently does not support {compression} compression") if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): with fsspec.open(self.path_or_buf, "wb", compression=compression) as buffer: written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs) else: if compression: raise NotImplementedError( f"The compression parameter is not supported when writing to a buffer, but compression={compression}" " was passed. Please provide a local path instead." ) written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs) return written def _batch_json(self, args): offset, orient, lines, to_json_kwargs = args batch = query_table( table=self.dataset.data, key=slice(offset, offset + self.batch_size), indices=self.dataset._indices, ) json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs) if not json_str.endswith("\n"): json_str += "\n" return json_str.encode(self.encoding) def _write( self, file_obj: BinaryIO, orient, lines, **to_json_kwargs, ) -> int: """Writes the pyarrow table as JSON lines to a binary file handle. Caller is responsible for opening and closing the handle. """ written = 0 if self.num_proc is None or self.num_proc == 1: for offset in hf_tqdm( range(0, len(self.dataset), self.batch_size), unit="ba", desc="Creating json from Arrow format", ): json_str = self._batch_json((offset, orient, lines, to_json_kwargs)) written += file_obj.write(json_str) else: num_rows, batch_size = len(self.dataset), self.batch_size with multiprocessing.Pool(self.num_proc) as pool: for json_str in hf_tqdm( pool.imap( self._batch_json, [(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", desc="Creating json from Arrow format", ): written += file_obj.write(json_str) return written
datasets/src/datasets/io/json.py/0
{ "file_path": "datasets/src/datasets/io/json.py", "repo_id": "datasets", "token_count": 2940 }
73
import glob import os import shutil import time from pathlib import Path from typing import List, Optional, Tuple import pyarrow as pa import datasets import datasets.config from datasets.naming import filenames_for_dataset_split logger = datasets.utils.logging.get_logger(__name__) def _get_modification_time(cached_directory_path): return (Path(cached_directory_path)).stat().st_mtime def _find_hash_in_cache( dataset_name: str, config_name: Optional[str], cache_dir: Optional[str] ) -> Tuple[str, str, str]: cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE)) cached_datasets_directory_path_root = os.path.join(cache_dir, dataset_name.replace("/", "___")) cached_directory_paths = [ cached_directory_path for cached_directory_path in glob.glob( os.path.join(cached_datasets_directory_path_root, config_name or "*", "*", "*") ) if os.path.isdir(cached_directory_path) ] if not cached_directory_paths: if config_name is not None: cached_directory_paths = [ cached_directory_path for cached_directory_path in glob.glob( os.path.join(cached_datasets_directory_path_root, "*", "*", "*") ) if os.path.isdir(cached_directory_path) ] available_configs = sorted( {Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths} ) raise ValueError( f"Couldn't find cache for {dataset_name}" + (f" for config '{config_name}'" if config_name else "") + (f"\nAvailable configs in the cache: {available_configs}" if available_configs else "") ) # get most recent cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1]) version, hash = cached_directory_path.parts[-2:] other_configs = [ Path(cached_directory_path).parts[-3] for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, "*", version, hash)) if os.path.isdir(cached_directory_path) ] if not config_name and len(other_configs) > 1: raise ValueError( f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}" f"\nPlease specify which configuration to reload from the cache, e.g." f"\n\tload_dataset('{dataset_name}', '{other_configs[0]}')" ) config_name = cached_directory_path.parts[-3] warning_msg = ( f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} " f"(last modified on {time.ctime(_get_modification_time(cached_directory_path))})." ) logger.warning(warning_msg) return config_name, version, hash class Cache(datasets.ArrowBasedBuilder): def __init__( self, cache_dir: Optional[str] = None, dataset_name: Optional[str] = None, config_name: Optional[str] = None, version: Optional[str] = "0.0.0", hash: Optional[str] = None, repo_id: Optional[str] = None, **kwargs, ): if repo_id is None and dataset_name is None: raise ValueError("repo_id or dataset_name is required for the Cache dataset builder") if hash == "auto" and version == "auto": config_name, version, hash = _find_hash_in_cache( dataset_name=repo_id or dataset_name, config_name=config_name, cache_dir=cache_dir, ) elif hash == "auto" or version == "auto": raise NotImplementedError("Pass both hash='auto' and version='auto' instead") super().__init__( cache_dir=cache_dir, dataset_name=dataset_name, config_name=config_name, version=version, hash=hash, repo_id=repo_id, **kwargs, ) def _info(self) -> datasets.DatasetInfo: return datasets.DatasetInfo() def download_and_prepare(self, output_dir: Optional[str] = None, *args, **kwargs): if not os.path.exists(self.cache_dir): raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}") if output_dir is not None and output_dir != self.cache_dir: shutil.copytree(self.cache_dir, output_dir) def _split_generators(self, dl_manager): # used to stream from cache if isinstance(self.info.splits, datasets.SplitDict): split_infos: List[datasets.SplitInfo] = list(self.info.splits.values()) else: raise ValueError(f"Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}") return [ datasets.SplitGenerator( name=split_info.name, gen_kwargs={ "files": filenames_for_dataset_split( self.cache_dir, dataset_name=self.dataset_name, split=split_info.name, filetype_suffix="arrow", shard_lengths=split_info.shard_lengths, ) }, ) for split_info in split_infos ] def _generate_tables(self, files): # used to stream from cache for file_idx, file in enumerate(files): with open(file, "rb") as f: try: for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)): pa_table = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"{file_idx}_{batch_idx}", pa_table except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
datasets/src/datasets/packaged_modules/cache/cache.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/cache/cache.py", "repo_id": "datasets", "token_count": 2938 }
74
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int logger = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class SparkConfig(datasets.BuilderConfig): """BuilderConfig for Spark.""" features: Optional[datasets.Features] = None def _reorder_dataframe_by_partition(df: "pyspark.sql.DataFrame", new_partition_order: List[int]): df_combined = df.select("*").where(f"part_id = {new_partition_order[0]}") for partition_id in new_partition_order[1:]: partition_df = df.select("*").where(f"part_id = {partition_id}") df_combined = df_combined.union(partition_df) return df_combined def _generate_iterable_examples( df: "pyspark.sql.DataFrame", partition_order: List[int], ): import pyspark def generate_fn(): df_with_partition_id = df.select("*", pyspark.sql.functions.spark_partition_id().alias("part_id")) partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order) row_id = 0 # pipeline next partition in parallel to hide latency rows = partition_df.toLocalIterator(prefetchPartitions=True) curr_partition = -1 for row in rows: row_as_dict = row.asDict() part_id = row_as_dict["part_id"] row_as_dict.pop("part_id") if curr_partition != part_id: curr_partition = part_id row_id = 0 yield f"{part_id}_{row_id}", row_as_dict row_id += 1 return generate_fn class SparkExamplesIterable(_BaseExamplesIterable): def __init__( self, df: "pyspark.sql.DataFrame", partition_order=None, ): self.df = df self.partition_order = partition_order or range(self.df.rdd.getNumPartitions()) self.generate_examples_fn = _generate_iterable_examples(self.df, self.partition_order) def __iter__(self): yield from self.generate_examples_fn() def shuffle_data_sources(self, generator: np.random.Generator) -> "SparkExamplesIterable": partition_order = list(range(self.df.rdd.getNumPartitions())) generator.shuffle(partition_order) return SparkExamplesIterable(self.df, partition_order=partition_order) def shard_data_sources(self, worker_id: int, num_workers: int) -> "SparkExamplesIterable": partition_order = self.split_shard_indices_by_worker(worker_id, num_workers) return SparkExamplesIterable(self.df, partition_order=partition_order) @property def n_shards(self) -> int: return len(self.partition_order) class Spark(datasets.DatasetBuilder): BUILDER_CONFIG_CLASS = SparkConfig def __init__( self, df: "pyspark.sql.DataFrame", cache_dir: str = None, working_dir: str = None, **config_kwargs, ): import pyspark self._spark = pyspark.sql.SparkSession.builder.getOrCreate() self.df = df self._working_dir = working_dir super().__init__( cache_dir=cache_dir, config_name=str(self.df.semanticHash()), **config_kwargs, ) def _validate_cache_dir(self): # Define this so that we don't reference self in create_cache_and_write_probe, which will result in a pickling # error due to pickling the SparkContext. cache_dir = self._cache_dir # Returns the path of the created file. def create_cache_and_write_probe(context): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(cache_dir, exist_ok=True) probe_file = os.path.join(cache_dir, "fs_test" + uuid.uuid4().hex) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(probe_file, "a") return [probe_file] if self._spark.conf.get("spark.master", "").startswith("local"): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: probe = ( self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect() ) if os.path.isfile(probe[0]): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager): return [datasets.SplitGenerator(name=datasets.Split.TRAIN)] def _repartition_df_if_needed(self, max_shard_size): import pyspark def get_arrow_batch_size(it): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]}) df_num_rows = self.df.count() sample_num_rows = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. approx_bytes_per_row = ( self.df.limit(sample_num_rows) .repartition(1) .mapInArrow(get_arrow_batch_size, "batch_bytes: long") .agg(pyspark.sql.functions.sum("batch_bytes").alias("sample_bytes")) .collect()[0] .sample_bytes / sample_num_rows ) approx_total_size = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size)) self.df = self.df.repartition(new_num_partitions) def _prepare_split_single( self, fpath: str, file_format: str, max_shard_size: int, ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: import pyspark writer_class = ParquetWriter if file_format == "parquet" else ArrowWriter working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath embed_local_files = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. features = self.config.features writer_batch_size = self._writer_batch_size storage_options = self._fs.storage_options def write_arrow(it): # Within the same SparkContext, no two task attempts will share the same attempt ID. task_id = pyspark.TaskContext().taskAttemptId() first_batch = next(it, None) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]], names=["task_id", "num_examples", "num_bytes"], ) shard_id = 0 writer = writer_class( features=features, path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files, ) table = pa.Table.from_batches([first_batch]) writer.write_table(table) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: num_examples, num_bytes = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) shard_id += 1 writer = writer_class( features=writer._features, path=working_fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files, ) table = pa.Table.from_batches([batch]) writer.write_table(table) if writer._num_bytes > 0: num_examples, num_bytes = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]], names=["task_id", "num_examples", "num_bytes"], ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(working_fpath)): dest = os.path.join(os.path.dirname(fpath), os.path.basename(file)) shutil.move(file, dest) stats = ( self.df.mapInArrow(write_arrow, "task_id: long, num_examples: long, num_bytes: long") .groupBy("task_id") .agg( pyspark.sql.functions.sum("num_examples").alias("total_num_examples"), pyspark.sql.functions.sum("num_bytes").alias("total_num_bytes"), pyspark.sql.functions.count("num_bytes").alias("num_shards"), pyspark.sql.functions.collect_list("num_examples").alias("shard_lengths"), ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def _prepare_split( self, split_generator: "datasets.SplitGenerator", file_format: str = "arrow", max_shard_size: Optional[Union[str, int]] = None, num_proc: Optional[int] = None, **kwargs, ): self._validate_cache_dir() max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE) self._repartition_df_if_needed(max_shard_size) is_local = not is_remote_filesystem(self._fs) path_join = os.path.join if is_local else posixpath.join SUFFIX = "-TTTTT-SSSSS-of-NNNNN" fname = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}" fpath = path_join(self._output_dir, fname) total_num_examples = 0 total_num_bytes = 0 total_shards = 0 task_id_and_num_shards = [] all_shard_lengths = [] for task_id, content in self._prepare_split_single(fpath, file_format, max_shard_size): ( num_examples, num_bytes, num_shards, shard_lengths, ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards)) all_shard_lengths.extend(shard_lengths) split_generator.split_info.num_examples = total_num_examples split_generator.split_info.num_bytes = total_num_bytes # should rename everything at the end logger.debug(f"Renaming {total_shards} shards.") if total_shards > 1: split_generator.split_info.shard_lengths = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. fs = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( task_id: int, shard_id: int, global_shard_id: int, ): rename( fs, fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), fpath.replace("TTTTT-SSSSS", f"{global_shard_id:05d}").replace("NNNNN", f"{total_shards:05d}"), ) args = [] global_shard_id = 0 for i in range(len(task_id_and_num_shards)): task_id, num_shards = task_id_and_num_shards[i] for shard_id in range(num_shards): args.append([task_id, shard_id, global_shard_id]) global_shard_id += 1 self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect() else: # don't use any pattern shard_id = 0 task_id = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS", f"{shard_id:05d}").replace("TTTTT", f"{task_id:05d}"), fpath.replace(SUFFIX, ""), ) def _get_examples_iterable_for_split( self, split_generator: "datasets.SplitGenerator", ) -> SparkExamplesIterable: return SparkExamplesIterable(self.df)
datasets/src/datasets/packaged_modules/spark/spark.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/spark/spark.py", "repo_id": "datasets", "token_count": 6664 }
75
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=True) class AutomaticSpeechRecognition(TaskTemplate): task: str = field(default="automatic-speech-recognition", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"audio": Audio()}) label_schema: ClassVar[Features] = Features({"transcription": Value("string")}) audio_column: str = "audio" transcription_column: str = "transcription" def align_with_features(self, features): if self.audio_column not in features: raise ValueError(f"Column {self.audio_column} is not present in features.") if not isinstance(features[self.audio_column], Audio): raise ValueError(f"Column {self.audio_column} is not an Audio type.") task_template = copy.deepcopy(self) input_schema = self.input_schema.copy() input_schema["audio"] = features[self.audio_column] task_template.__dict__["input_schema"] = input_schema return task_template @property def column_mapping(self) -> Dict[str, str]: return {self.audio_column: "audio", self.transcription_column: "transcription"}
datasets/src/datasets/tasks/automatic_speech_recognition.py/0
{ "file_path": "datasets/src/datasets/tasks/automatic_speech_recognition.py", "repo_id": "datasets", "token_count": 459 }
76
import bz2 import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from ._filelock import FileLock from .logging import get_logger logger = get_logger(__name__) class ExtractManager: def __init__(self, cache_dir: Optional[str] = None): self.extract_dir = ( os.path.join(cache_dir, config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH ) self.extractor = Extractor def _get_output_path(self, path: str) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" abs_path = os.path.abspath(path) return os.path.join(self.extract_dir, hash_url_to_filename(abs_path)) def _do_extract(self, output_path: str, force_extract: bool) -> bool: return force_extract or ( not os.path.isfile(output_path) and not (os.path.isdir(output_path) and os.listdir(output_path)) ) def extract(self, input_path: str, force_extract: bool = False) -> str: extractor_format = self.extractor.infer_extractor_format(input_path) if not extractor_format: return input_path output_path = self._get_output_path(input_path) if self._do_extract(output_path, force_extract): self.extractor.extract(input_path, output_path, extractor_format) return output_path class BaseExtractor(ABC): @classmethod @abstractmethod def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: ... @staticmethod @abstractmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: ... class MagicNumberBaseExtractor(BaseExtractor, ABC): magic_numbers: List[bytes] = [] @staticmethod def read_magic_number(path: Union[Path, str], magic_number_length: int): with open(path, "rb") as f: return f.read(magic_number_length) @classmethod def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool: if not magic_number: magic_number_length = max(len(cls_magic_number) for cls_magic_number in cls.magic_numbers) try: magic_number = cls.read_magic_number(path, magic_number_length) except OSError: return False return any(magic_number.startswith(cls_magic_number) for cls_magic_number in cls.magic_numbers) class TarExtractor(BaseExtractor): @classmethod def is_extractable(cls, path: Union[Path, str], **kwargs) -> bool: return tarfile.is_tarfile(path) @staticmethod def safemembers(members, output_path): """ Fix for CVE-2007-4559 Desc: Directory traversal vulnerability in the (1) extract and (2) extractall functions in the tarfile module in Python allows user-assisted remote attackers to overwrite arbitrary files via a .. (dot dot) sequence in filenames in a TAR archive, a related issue to CVE-2001-1267. See: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-4559 From: https://stackoverflow.com/a/10077309 """ def resolved(path: str) -> str: return os.path.realpath(os.path.abspath(path)) def badpath(path: str, base: str) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(base, path)).startswith(base) def badlink(info, base: str) -> bool: # Links are interpreted relative to the directory containing the link tip = resolved(os.path.join(base, os.path.dirname(info.name))) return badpath(info.linkname, base=tip) base = resolved(output_path) for finfo in members: if badpath(finfo.name, base): logger.error(f"Extraction of {finfo.name} is blocked (illegal path)") elif finfo.issym() and badlink(finfo, base): logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}") elif finfo.islnk() and badlink(finfo, base): logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}") else: yield finfo @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: os.makedirs(output_path, exist_ok=True) tar_file = tarfile.open(input_path) tar_file.extractall(output_path, members=TarExtractor.safemembers(tar_file, output_path)) tar_file.close() class GzipExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\x1F\x8B"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: with gzip.open(input_path, "rb") as gzip_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(gzip_file, extracted_file) class ZipExtractor(MagicNumberBaseExtractor): magic_numbers = [ b"PK\x03\x04", b"PK\x05\x06", # empty archive b"PK\x07\x08", # spanned archive ] @classmethod def is_extractable(cls, path: Union[Path, str], magic_number: bytes = b"") -> bool: if super().is_extractable(path, magic_number=magic_number): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(path, "rb") as fp: endrec = _EndRecData(fp) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: data = fp.read(sizeCentralDir) # CD is where we expect it to be if len(data) == sizeCentralDir: centdir = struct.unpack(structCentralDir, data) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: os.makedirs(output_path, exist_ok=True) with zipfile.ZipFile(input_path, "r") as zip_file: zip_file.extractall(output_path) zip_file.close() class XzExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\xFD\x37\x7A\x58\x5A\x00"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: with lzma.open(input_path) as compressed_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(compressed_file, extracted_file) class RarExtractor(MagicNumberBaseExtractor): magic_numbers = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("Please pip install rarfile") import rarfile os.makedirs(output_path, exist_ok=True) rf = rarfile.RarFile(input_path) rf.extractall(output_path) rf.close() class ZstdExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\x28\xb5\x2F\xFD"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("Please pip install zstandard") import zstandard as zstd dctx = zstd.ZstdDecompressor() with open(input_path, "rb") as ifh, open(output_path, "wb") as ofh: dctx.copy_stream(ifh, ofh) class Bzip2Extractor(MagicNumberBaseExtractor): magic_numbers = [b"\x42\x5A\x68"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: with bz2.open(input_path, "rb") as compressed_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(compressed_file, extracted_file) class SevenZipExtractor(MagicNumberBaseExtractor): magic_numbers = [b"\x37\x7A\xBC\xAF\x27\x1C"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("Please pip install py7zr") import py7zr os.makedirs(output_path, exist_ok=True) with py7zr.SevenZipFile(input_path, "r") as archive: archive.extractall(output_path) class Lz4Extractor(MagicNumberBaseExtractor): magic_numbers = [b"\x04\x22\x4D\x18"] @staticmethod def extract(input_path: Union[Path, str], output_path: Union[Path, str]) -> None: if not config.LZ4_AVAILABLE: raise ImportError("Please pip install lz4") import lz4.frame with lz4.frame.open(input_path, "rb") as compressed_file: with open(output_path, "wb") as extracted_file: shutil.copyfileobj(compressed_file, extracted_file) class Extractor: # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) extractors: Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": Bzip2Extractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": Lz4Extractor, # <Added version="2.4.0"/> } @classmethod def _get_magic_number_max_length(cls): return max( len(extractor_magic_number) for extractor in cls.extractors.values() if issubclass(extractor, MagicNumberBaseExtractor) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def _read_magic_number(path: Union[Path, str], magic_number_length: int): try: return MagicNumberBaseExtractor.read_magic_number(path, magic_number_length=magic_number_length) except OSError: return b"" @classmethod def is_extractable(cls, path: Union[Path, str], return_extractor: bool = False) -> bool: warnings.warn( "Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'infer_extractor_format' instead.", category=FutureWarning, ) extractor_format = cls.infer_extractor_format(path) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def infer_extractor_format(cls, path: Union[Path, str]) -> str: # <Added version="2.4.0"/> magic_number_max_length = cls._get_magic_number_max_length() magic_number = cls._read_magic_number(path, magic_number_max_length) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(path, magic_number=magic_number): return extractor_format @classmethod def extract( cls, input_path: Union[Path, str], output_path: Union[Path, str], extractor_format: Optional[str] = None, # <Added version="2.4.0"/> extractor: Optional[BaseExtractor] = "deprecated", ) -> None: os.makedirs(os.path.dirname(output_path), exist_ok=True) # Prevent parallel extractions lock_path = str(Path(output_path).with_suffix(".lock")) with FileLock(lock_path): shutil.rmtree(output_path, ignore_errors=True) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(extractor_format, str): # passed as positional arg warnings.warn( "Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. " "Use 'extractor_format' instead.", category=FutureWarning, ) extractor = extractor if extractor != "deprecated" else extractor_format else: extractor = cls.extractors[extractor_format] return extractor.extract(input_path, output_path) else: warnings.warn( "Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an " "exception in 3.0.0.", category=FutureWarning, ) for extractor in cls.extractors.values(): if extractor.is_extractable(input_path): return extractor.extract(input_path, output_path)
datasets/src/datasets/utils/extract.py/0
{ "file_path": "datasets/src/datasets/utils/extract.py", "repo_id": "datasets", "token_count": 6410 }
77
from typing import List import numpy as np def _number_of_shards_in_gen_kwargs(gen_kwargs: dict) -> int: """Return the number of possible shards according to the input gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case # until we decide how to define sharding without ambiguity for users lists_lengths = {key: len(value) for key, value in gen_kwargs.items() if isinstance(value, list)} if len(set(lists_lengths.values())) > 1: raise RuntimeError( ( "Sharding is ambiguous for this dataset: " + "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n" + "\n".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items()) + "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, " + "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length." ) ) max_length = max(lists_lengths.values(), default=0) return max(1, max_length) def _distribute_shards(num_shards: int, max_num_jobs: int) -> List[range]: """ Get the range of shard indices per job. If num_shards<max_num_jobs, then num_shards jobs are given a range of one shard. The shards indices order is preserved: e.g. all the first shards are given the first job. Moreover all the jobs are given approximately the same number of shards. Example: ```python >>> _distribute_shards(2, max_num_jobs=4) [range(0, 1), range(1, 2)] >>> _distribute_shards(10, max_num_jobs=3) [range(0, 4), range(4, 7), range(7, 10)] ``` """ shards_indices_per_group = [] for group_idx in range(max_num_jobs): num_shards_to_add = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break start = shards_indices_per_group[-1].stop if shards_indices_per_group else 0 shard_indices = range(start, start + num_shards_to_add) shards_indices_per_group.append(shard_indices) return shards_indices_per_group def _split_gen_kwargs(gen_kwargs: dict, max_num_jobs: int) -> List[dict]: """Split the gen_kwargs into `max_num_job` gen_kwargs""" # Having lists of different sizes makes sharding ambigious, raise an error in this case num_shards = _number_of_shards_in_gen_kwargs(gen_kwargs) if num_shards == 1: return [dict(gen_kwargs)] else: shard_indices_per_group = _distribute_shards(num_shards=num_shards, max_num_jobs=max_num_jobs) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(value, list) else value for key, value in gen_kwargs.items() } for group_idx in range(len(shard_indices_per_group)) ] def _merge_gen_kwargs(gen_kwargs_list: List[dict]) -> dict: return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key], list) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def _shuffle_gen_kwargs(rng: np.random.Generator, gen_kwargs: dict) -> dict: """Return a shuffled copy of the input gen_kwargs""" # We must shuffle all the lists, and lists of the same size must have the same shuffling. # This way entangled lists of (shard, shard_metadata) are still in the right order. # First, let's generate the shuffled indices per list size list_sizes = {len(value) for value in gen_kwargs.values() if isinstance(value, list)} indices_per_size = {} for size in list_sizes: indices_per_size[size] = list(range(size)) rng.shuffle(indices_per_size[size]) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes shuffled_kwargs = dict(gen_kwargs) for key, value in shuffled_kwargs.items(): if isinstance(value, list): shuffled_kwargs[key] = [value[i] for i in indices_per_size[len(value)]] return shuffled_kwargs
datasets/src/datasets/utils/sharding.py/0
{ "file_path": "datasets/src/datasets/utils/sharding.py", "repo_id": "datasets", "token_count": 1742 }
78
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _TestCommandArgs = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", ], defaults=[None, None, None, False, False, False, False, False], ) def is_1percent_close(source, target): return (abs(source - target) / target) < 0.01 @pytest.mark.integration def test_test_command(dataset_loading_script_dir): args = _TestCommandArgs(dataset=dataset_loading_script_dir, all_configs=True, save_infos=True) test_command = TestCommand(*args) test_command.run() dataset_readme_path = os.path.join(dataset_loading_script_dir, "README.md") assert os.path.exists(dataset_readme_path) dataset_infos = DatasetInfosDict.from_directory(dataset_loading_script_dir) expected_dataset_infos = DatasetInfosDict( { "default": DatasetInfo( features=Features( { "tokens": Sequence(Value("string")), "ner_tags": Sequence( ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]) ), "langs": Sequence(Value("string")), "spans": Sequence(Value("string")), } ), splits=[ { "name": "train", "num_bytes": 2351563, "num_examples": 10000, }, { "name": "validation", "num_bytes": 238418, "num_examples": 1000, }, ], download_size=3940680, dataset_size=2589981, ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key) if key == "num_bytes": assert is_1percent_close(result, expected) elif key == "splits": assert list(result) == list(expected) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes) else: result == expected
datasets/tests/commands/test_test.py/0
{ "file_path": "datasets/tests/commands/test_test.py", "repo_id": "datasets", "token_count": 1496 }
79
import contextlib import csv import json import os import sqlite3 import tarfile import textwrap import zipfile import pandas as pd import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config # dataset + arrow_file @pytest.fixture(scope="session") def dataset(): n = 10 features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"])), "answers": datasets.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), } ), "id": datasets.Value("int64"), } ) dataset = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(n)), }, features=features, ) return dataset @pytest.fixture(scope="session") def arrow_file(tmp_path_factory, dataset): filename = str(tmp_path_factory.mktemp("data") / "file.arrow") dataset.map(cache_file_name=filename) return filename # FILE_CONTENT + files FILE_CONTENT = """\ Text data. Second line of data.""" @pytest.fixture(scope="session") def text_file(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.txt" data = FILE_CONTENT with open(filename, "w") as f: f.write(data) return filename @pytest.fixture(scope="session") def bz2_file(tmp_path_factory): import bz2 path = tmp_path_factory.mktemp("data") / "file.txt.bz2" data = bytes(FILE_CONTENT, "utf-8") with bz2.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def gz_file(tmp_path_factory): import gzip path = str(tmp_path_factory.mktemp("data") / "file.txt.gz") data = bytes(FILE_CONTENT, "utf-8") with gzip.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def lz4_file(tmp_path_factory): if datasets.config.LZ4_AVAILABLE: import lz4.frame path = tmp_path_factory.mktemp("data") / "file.txt.lz4" data = bytes(FILE_CONTENT, "utf-8") with lz4.frame.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def seven_zip_file(tmp_path_factory, text_file): if datasets.config.PY7ZR_AVAILABLE: import py7zr path = tmp_path_factory.mktemp("data") / "file.txt.7z" with py7zr.SevenZipFile(path, "w") as archive: archive.write(text_file, arcname=os.path.basename(text_file)) return path @pytest.fixture(scope="session") def tar_file(tmp_path_factory, text_file): import tarfile path = tmp_path_factory.mktemp("data") / "file.txt.tar" with tarfile.TarFile(path, "w") as f: f.add(text_file, arcname=os.path.basename(text_file)) return path @pytest.fixture(scope="session") def xz_file(tmp_path_factory): import lzma path = tmp_path_factory.mktemp("data") / "file.txt.xz" data = bytes(FILE_CONTENT, "utf-8") with lzma.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def zip_file(tmp_path_factory, text_file): import zipfile path = tmp_path_factory.mktemp("data") / "file.txt.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_file, arcname=os.path.basename(text_file)) return path @pytest.fixture(scope="session") def zstd_file(tmp_path_factory): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd path = tmp_path_factory.mktemp("data") / "file.txt.zst" data = bytes(FILE_CONTENT, "utf-8") with zstd.open(path, "wb") as f: f.write(data) return path # xml_file @pytest.fixture(scope="session") def xml_file(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.xml" data = textwrap.dedent( """\ <?xml version="1.0" encoding="UTF-8" ?> <tmx version="1.4"> <header segtype="sentence" srclang="ca" /> <body> <tu> <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv> <tuv xml:lang="en"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv> <tuv xml:lang="en"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv> <tuv xml:lang="en"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv> <tuv xml:lang="en"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv> <tuv xml:lang="en"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(filename, "w") as f: f.write(data) return filename DATA = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] DATA2 = [ {"col_1": "4", "col_2": 4, "col_3": 4.0}, {"col_1": "5", "col_2": 5, "col_3": 5.0}, ] DATA_DICT_OF_LISTS = { "col_1": ["0", "1", "2", "3"], "col_2": [0, 1, 2, 3], "col_3": [0.0, 1.0, 2.0, 3.0], } DATA_312 = [ {"col_3": 0.0, "col_1": "0", "col_2": 0}, {"col_3": 1.0, "col_1": "1", "col_2": 1}, ] DATA_STR = [ {"col_1": "s0", "col_2": 0, "col_3": 0.0}, {"col_1": "s1", "col_2": 1, "col_3": 1.0}, {"col_1": "s2", "col_2": 2, "col_3": 2.0}, {"col_1": "s3", "col_2": 3, "col_3": 3.0}, ] @pytest.fixture(scope="session") def dataset_dict(): return DATA_DICT_OF_LISTS @pytest.fixture(scope="session") def arrow_path(tmp_path_factory): dataset = datasets.Dataset.from_dict(DATA_DICT_OF_LISTS) path = str(tmp_path_factory.mktemp("data") / "dataset.arrow") dataset.map(cache_file_name=path) return path @pytest.fixture(scope="session") def sqlite_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.sqlite") with contextlib.closing(sqlite3.connect(path)) as con: cur = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)") for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values())) con.commit() return path @pytest.fixture(scope="session") def csv_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.csv") with open(path, "w", newline="") as f: writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"]) writer.writeheader() for item in DATA: writer.writerow(item) return path @pytest.fixture(scope="session") def csv2_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset2.csv") with open(path, "w", newline="") as f: writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"]) writer.writeheader() for item in DATA: writer.writerow(item) return path @pytest.fixture(scope="session") def bz2_csv_path(csv_path, tmp_path_factory): import bz2 path = tmp_path_factory.mktemp("data") / "dataset.csv.bz2" with open(csv_path, "rb") as f: data = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bz2.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def zip_csv_path(csv_path, csv2_path, tmp_path_factory): path = tmp_path_factory.mktemp("zip_csv_path") / "csv-dataset.zip" with zipfile.ZipFile(path, "w") as f: f.write(csv_path, arcname=os.path.basename(csv_path)) f.write(csv2_path, arcname=os.path.basename(csv2_path)) return path @pytest.fixture(scope="session") def zip_uppercase_csv_path(csv_path, csv2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.csv.zip" with zipfile.ZipFile(path, "w") as f: f.write(csv_path, arcname=os.path.basename(csv_path.replace(".csv", ".CSV"))) f.write(csv2_path, arcname=os.path.basename(csv2_path.replace(".csv", ".CSV"))) return path @pytest.fixture(scope="session") def zip_csv_with_dir_path(csv_path, csv2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip" with zipfile.ZipFile(path, "w") as f: f.write(csv_path, arcname=os.path.join("main_dir", os.path.basename(csv_path))) f.write(csv2_path, arcname=os.path.join("main_dir", os.path.basename(csv2_path))) return path @pytest.fixture(scope="session") def parquet_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.parquet") schema = pa.schema( { "col_1": pa.string(), "col_2": pa.int64(), "col_3": pa.float64(), } ) with open(path, "wb") as f: writer = pq.ParquetWriter(f, schema=schema) pa_table = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(DATA))] for k in DATA[0]}, schema=schema) writer.write_table(pa_table) writer.close() return path @pytest.fixture(scope="session") def geoparquet_path(tmp_path_factory): df = pd.read_parquet(path="https://github.com/opengeospatial/geoparquet/raw/v1.0.0/examples/example.parquet") path = str(tmp_path_factory.mktemp("data") / "dataset.geoparquet") df.to_parquet(path=path) return path @pytest.fixture(scope="session") def json_list_of_dicts_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.json") data = {"data": DATA} with open(path, "w") as f: json.dump(data, f) return path @pytest.fixture(scope="session") def json_dict_of_lists_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.json") data = {"data": DATA_DICT_OF_LISTS} with open(path, "w") as f: json.dump(data, f) return path @pytest.fixture(scope="session") def jsonl_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl") with open(path, "w") as f: for item in DATA: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def jsonl2_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl") with open(path, "w") as f: for item in DATA: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def jsonl_312_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl") with open(path, "w") as f: for item in DATA_312: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def jsonl_str_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl") with open(path, "w") as f: for item in DATA_STR: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def text_gz_path(tmp_path_factory, text_path): import gzip path = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz") with open(text_path, "rb") as orig_file: with gzip.open(path, "wb") as zipped_file: zipped_file.writelines(orig_file) return path @pytest.fixture(scope="session") def jsonl_gz_path(tmp_path_factory, jsonl_path): import gzip path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz") with open(jsonl_path, "rb") as orig_file: with gzip.open(path, "wb") as zipped_file: zipped_file.writelines(orig_file) return path @pytest.fixture(scope="session") def zip_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip" with zipfile.ZipFile(path, "w") as f: f.write(jsonl_path, arcname=os.path.basename(jsonl_path)) f.write(jsonl2_path, arcname=os.path.basename(jsonl2_path)) return path @pytest.fixture(scope="session") def zip_nested_jsonl_path(zip_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.zip" with zipfile.ZipFile(path, "w") as f: f.write(zip_jsonl_path, arcname=os.path.join("nested", os.path.basename(zip_jsonl_path))) return path @pytest.fixture(scope="session") def zip_jsonl_with_dir_path(jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(path, "w") as f: f.write(jsonl_path, arcname=os.path.join("main_dir", os.path.basename(jsonl_path))) f.write(jsonl2_path, arcname=os.path.join("main_dir", os.path.basename(jsonl2_path))) return path @pytest.fixture(scope="session") def tar_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar" with tarfile.TarFile(path, "w") as f: f.add(jsonl_path, arcname=os.path.basename(jsonl_path)) f.add(jsonl2_path, arcname=os.path.basename(jsonl2_path)) return path @pytest.fixture(scope="session") def tar_nested_jsonl_path(tar_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar" with tarfile.TarFile(path, "w") as f: f.add(tar_jsonl_path, arcname=os.path.join("nested", os.path.basename(tar_jsonl_path))) return path @pytest.fixture(scope="session") def text_path(tmp_path_factory): data = ["0", "1", "2", "3"] path = str(tmp_path_factory.mktemp("data") / "dataset.txt") with open(path, "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def text2_path(tmp_path_factory): data = ["0", "1", "2", "3"] path = str(tmp_path_factory.mktemp("data") / "dataset2.txt") with open(path, "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def text_dir(tmp_path_factory): data = ["0", "1", "2", "3"] path = tmp_path_factory.mktemp("data_text_dir") / "dataset.txt" with open(path, "w") as f: for item in data: f.write(item + "\n") return path.parent @pytest.fixture(scope="session") def text_dir_with_unsupported_extension(tmp_path_factory): data = ["0", "1", "2", "3"] path = tmp_path_factory.mktemp("data") / "dataset.abc" with open(path, "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def zip_text_path(text_path, text2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.text.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_path, arcname=os.path.basename(text_path)) f.write(text2_path, arcname=os.path.basename(text2_path)) return path @pytest.fixture(scope="session") def zip_text_with_dir_path(text_path, text2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_path, arcname=os.path.join("main_dir", os.path.basename(text_path))) f.write(text2_path, arcname=os.path.join("main_dir", os.path.basename(text2_path))) return path @pytest.fixture(scope="session") def zip_unsupported_ext_path(text_path, text2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.ext.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_path, arcname=os.path.basename("unsupported.ext")) f.write(text2_path, arcname=os.path.basename("unsupported_2.ext")) return path @pytest.fixture(scope="session") def text_path_with_unicode_new_lines(tmp_path_factory): text = "\n".join(["First", "Second\u2029with Unicode new line", "Third"]) path = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt") with open(path, "w", encoding="utf-8") as f: f.write(text) return path @pytest.fixture(scope="session") def image_file(): return os.path.join("tests", "features", "data", "test_image_rgb.jpg") @pytest.fixture(scope="session") def audio_file(): return os.path.join("tests", "features", "data", "test_audio_44100.wav") @pytest.fixture(scope="session") def zip_image_path(image_file, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.img.zip" with zipfile.ZipFile(path, "w") as f: f.write(image_file, arcname=os.path.basename(image_file)) f.write(image_file, arcname=os.path.basename(image_file).replace(".jpg", "2.jpg")) return path @pytest.fixture(scope="session") def data_dir_with_hidden_files(tmp_path_factory): data_dir = tmp_path_factory.mktemp("data_dir") (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "subdir" / "test.txt", "w") as f: f.write("bar\n" * 10) # hidden file with open(data_dir / "subdir" / ".test.txt", "w") as f: f.write("bar\n" * 10) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / ".subdir" / "test.txt", "w") as f: f.write("bar\n" * 10) return data_dir
datasets/tests/fixtures/files.py/0
{ "file_path": "datasets/tests/fixtures/files.py", "repo_id": "datasets", "token_count": 8208 }
80
import importlib import shutil import textwrap import pytest from datasets import ClassLabel, DownloadManager, Features, Value from datasets.data_files import DataFilesDict, get_data_patterns from datasets.download.streaming_download_manager import StreamingDownloadManager from datasets.packaged_modules.folder_based_builder.folder_based_builder import ( FolderBasedBuilder, FolderBasedBuilderConfig, ) from datasets.tasks import TextClassification remote_files = [ "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hallo.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/hello.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour.txt", "https://huggingface.co/datasets/hf-internal-testing/textfolder/resolve/main/class1/bonjour2.txt", ] class DummyFolderBasedBuilder(FolderBasedBuilder): BASE_FEATURE = dict BASE_COLUMN_NAME = "base" BUILDER_CONFIG_CLASS = FolderBasedBuilderConfig EXTENSIONS = [".txt"] CLASSIFICATION_TASK = TextClassification(text_column="base", label_column="label") @pytest.fixture def cache_dir(tmp_path): return str(tmp_path / "autofolder_cache_dir") @pytest.fixture def auto_text_file(text_file): return str(text_file) @pytest.fixture def data_files_with_labels_no_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "data_files_with_labels_no_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_labels_no_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_labels_no_metadata @pytest.fixture def data_files_with_different_levels_no_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "data_files_with_different_levels" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "subdir" / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_different_levels = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) return data_files_with_different_levels @pytest.fixture def data_files_with_one_label_no_metadata(tmp_path, auto_text_file): # only one label found = all files in a single dir/in a root dir data_dir = tmp_path / "data_files_with_one_label" data_dir.mkdir(parents=True, exist_ok=True) filename = data_dir / "file0.txt" shutil.copyfile(auto_text_file, filename) filename2 = data_dir / "file1.txt" shutil.copyfile(auto_text_file, filename2) data_files_with_one_label = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) return data_files_with_one_label @pytest.fixture def files_with_labels_and_duplicated_label_key_in_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "files_with_labels_and_label_key_in_metadata" data_dir.mkdir(parents=True, exist_ok=True) subdir_class_0 = data_dir / "class0" subdir_class_0.mkdir(parents=True, exist_ok=True) subdir_class_1 = data_dir / "class1" subdir_class_1.mkdir(parents=True, exist_ok=True) filename = subdir_class_0 / "file_class0.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir_class_1 / "file_class1.txt" shutil.copyfile(auto_text_file, filename2) metadata_filename = tmp_path / data_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "class0/file_class0.txt", "additional_feature": "First dummy file", "label": "CLASS_0"} {"file_name": "class1/file_class1.txt", "additional_feature": "Second dummy file", "label": "CLASS_1"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(filename2), str(metadata_filename) @pytest.fixture def file_with_metadata(tmp_path, text_file): filename = tmp_path / "file.txt" shutil.copyfile(text_file, filename) metadata_filename = tmp_path / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(metadata_filename) @pytest.fixture() def files_with_metadata_that_misses_one_sample(tmp_path, auto_text_file): filename = tmp_path / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = tmp_path / "file2.txt" shutil.copyfile(auto_text_file, filename2) metadata_filename = tmp_path / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) return str(filename), str(filename2), str(metadata_filename) @pytest.fixture def data_files_with_one_split_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_metadata_one_split" data_dir.mkdir(parents=True, exist_ok=True) subdir = data_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) filename = data_dir / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = data_dir / "file2.txt" shutil.copyfile(auto_text_file, filename2) filename3 = subdir / "file3.txt" # in subdir shutil.copyfile(auto_text_file, filename3) metadata_filename = data_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} {"file_name": "file2.txt", "additional_feature": "Second dummy file"} {"file_name": "./subdir/file3.txt", "additional_feature": "Third dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_one_split_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_one_split_and_metadata) == 1 assert len(data_files_with_one_split_and_metadata["train"]) == 4 return data_files_with_one_split_and_metadata @pytest.fixture def data_files_with_two_splits_and_metadata(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_metadata_two_splits" data_dir.mkdir(parents=True, exist_ok=True) train_dir = data_dir / "train" train_dir.mkdir(parents=True, exist_ok=True) test_dir = data_dir / "test" test_dir.mkdir(parents=True, exist_ok=True) filename = train_dir / "file.txt" # train shutil.copyfile(auto_text_file, filename) filename2 = train_dir / "file2.txt" # train shutil.copyfile(auto_text_file, filename2) filename3 = test_dir / "file3.txt" # test shutil.copyfile(auto_text_file, filename3) train_metadata_filename = train_dir / "metadata.jsonl" train_metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Train dummy file"} {"file_name": "file2.txt", "additional_feature": "Second train dummy file"} """ ) with open(train_metadata_filename, "w", encoding="utf-8") as f: f.write(train_metadata) test_metadata_filename = test_dir / "metadata.jsonl" test_metadata = textwrap.dedent( """\ {"file_name": "file3.txt", "additional_feature": "Test dummy file"} """ ) with open(test_metadata_filename, "w", encoding="utf-8") as f: f.write(test_metadata) data_files_with_two_splits_and_metadata = DataFilesDict.from_patterns( get_data_patterns(str(data_dir)), data_dir.as_posix() ) assert len(data_files_with_two_splits_and_metadata) == 2 assert len(data_files_with_two_splits_and_metadata["train"]) == 3 assert len(data_files_with_two_splits_and_metadata["test"]) == 2 return data_files_with_two_splits_and_metadata @pytest.fixture def data_files_with_zip_archives(tmp_path, auto_text_file): data_dir = tmp_path / "autofolder_data_dir_with_zip_archives" data_dir.mkdir(parents=True, exist_ok=True) archive_dir = data_dir / "archive" archive_dir.mkdir(parents=True, exist_ok=True) subdir = archive_dir / "subdir" subdir.mkdir(parents=True, exist_ok=True) filename = archive_dir / "file.txt" shutil.copyfile(auto_text_file, filename) filename2 = subdir / "file2.txt" # in subdir shutil.copyfile(auto_text_file, filename2) metadata_filename = archive_dir / "metadata.jsonl" metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} {"file_name": "subdir/file2.txt", "additional_feature": "Second dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) shutil.make_archive(archive_dir, "zip", archive_dir) shutil.rmtree(str(archive_dir)) data_files_with_zip_archives = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) assert len(data_files_with_zip_archives) == 1 assert len(data_files_with_zip_archives["train"]) == 1 return data_files_with_zip_archives def test_inferring_labels_from_data_dirs(data_files_with_labels_no_metadata, cache_dir): autofolder = DummyFolderBasedBuilder( data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, drop_labels=False ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs assert autofolder.info.features == Features({"base": {}, "label": ClassLabel(names=["class0", "class1"])}) generator = autofolder._generate_examples(**gen_kwargs) assert all(example["label"] in {"class0", "class1"} for _, example in generator) def test_default_folder_builder_not_usable(data_files_with_labels_no_metadata, cache_dir): # builder would try to access non-existing attributes of a default `BuilderConfig` class # as a custom one is not provided with pytest.raises(AttributeError): _ = FolderBasedBuilder( data_files=data_files_with_labels_no_metadata, cache_dir=cache_dir, ) # test that AutoFolder is extended for streaming when it's child class is instantiated: # see line 115 in src/datasets/streaming.py def test_streaming_patched(): _ = DummyFolderBasedBuilder() module = importlib.import_module(FolderBasedBuilder.__module__) assert hasattr(module, "_patched_for_streaming") assert module._patched_for_streaming @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_duplicated_label_key( files_with_labels_and_duplicated_label_key_in_metadata, drop_metadata, drop_labels, cache_dir, caplog ): class0_file, class1_file, metadata_file = files_with_labels_and_duplicated_label_key_in_metadata autofolder = DummyFolderBasedBuilder( data_files=[class0_file, class1_file, metadata_file], cache_dir=cache_dir, drop_metadata=drop_metadata, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is False: # infer labels from directories even if metadata files are found warning_in_logs = any("ignoring metadata columns" in record.msg.lower() for record in caplog.records) assert warning_in_logs if drop_metadata is not True else not warning_in_logs assert autofolder.info.features["label"] == ClassLabel(names=["class0", "class1"]) assert all(example["label"] in ["class0", "class1"] for _, example in generator) else: if drop_metadata is not True: # labels are from metadata assert autofolder.info.features["label"] == Value("string") assert all(example["label"] in ["CLASS_0", "CLASS_1"] for _, example in generator) else: # drop both labels and metadata assert autofolder.info.features == Features({"base": {}}) assert all(example.keys() == {"base"} for _, example in generator) @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_labels( data_files_with_labels_no_metadata, auto_text_file, drop_metadata, drop_labels, cache_dir ): autofolder = DummyFolderBasedBuilder( data_files=data_files_with_labels_no_metadata, drop_metadata=drop_metadata, drop_labels=drop_labels, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # removing labels explicitly requires drop_labels=True assert gen_kwargs["add_labels"] is not bool(drop_labels) assert gen_kwargs["add_metadata"] is False generator = autofolder._generate_examples(**gen_kwargs) if not drop_labels: assert all( example.keys() == {"base", "label"} and all(val is not None for val in example.values()) for _, example in generator ) else: assert all( example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator ) @pytest.mark.parametrize("drop_metadata", [None, True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_generate_examples_drop_metadata(file_with_metadata, drop_metadata, drop_labels, cache_dir): file, metadata_file = file_with_metadata autofolder = DummyFolderBasedBuilder( data_files=[file, metadata_file], drop_metadata=drop_metadata, drop_labels=drop_labels, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs # since the dataset has metadata, removing the metadata explicitly requires drop_metadata=True assert gen_kwargs["add_metadata"] is not bool(drop_metadata) # since the dataset has metadata, adding the labels explicitly requires drop_labels=False assert gen_kwargs["add_labels"] is (drop_labels is False) generator = autofolder._generate_examples(**gen_kwargs) expected_columns = {"base"} if gen_kwargs["add_metadata"]: expected_columns.add("additional_feature") if gen_kwargs["add_labels"]: expected_columns.add("label") result = [example for _, example in generator] assert len(result) == 1 example = result[0] assert example.keys() == expected_columns for column in expected_columns: assert example[column] is not None @pytest.mark.parametrize("remote", [True, False]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_data_files_with_different_levels_no_metadata( data_files_with_different_levels_no_metadata, drop_labels, remote, cache_dir ): data_files = remote_files if remote else data_files_with_different_levels_no_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is not False: # with None (default) we should drop labels if files are on different levels in dir structure assert "label" not in autofolder.info.features assert all(example.keys() == {"base"} for _, example in generator) else: assert "label" in autofolder.info.features assert isinstance(autofolder.info.features["label"], ClassLabel) assert all(example.keys() == {"base", "label"} for _, example in generator) @pytest.mark.parametrize("remote", [False, True]) @pytest.mark.parametrize("drop_labels", [None, True, False]) def test_data_files_with_one_label_no_metadata(data_files_with_one_label_no_metadata, drop_labels, remote, cache_dir): data_files = remote_files[:2] if remote else data_files_with_one_label_no_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, drop_labels=drop_labels, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if drop_labels is not False: # with None (default) we should drop labels if only one label is found (=if there is a single dir) assert "label" not in autofolder.info.features assert all(example.keys() == {"base"} for _, example in generator) else: assert "label" in autofolder.info.features assert isinstance(autofolder.info.features["label"], ClassLabel) assert all(example.keys() == {"base", "label"} for _, example in generator) @pytest.mark.parametrize("drop_metadata", [None, True, False]) def test_data_files_with_metadata_that_misses_one_sample( files_with_metadata_that_misses_one_sample, drop_metadata, cache_dir ): file, file2, metadata_file = files_with_metadata_that_misses_one_sample if not drop_metadata: features = Features({"base": None, "additional_feature": Value("string")}) else: features = Features({"base": None}) autofolder = DummyFolderBasedBuilder( data_files=[file, file2, metadata_file], drop_metadata=drop_metadata, features=features, cache_dir=cache_dir, ) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) if not drop_metadata: with pytest.raises(ValueError): list(generator) else: assert all( example.keys() == {"base"} and all(val is not None for val in example.values()) for _, example in generator ) @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("n_splits", [1, 2]) def test_data_files_with_metadata_and_splits( streaming, cache_dir, n_splits, data_files_with_one_split_and_metadata, data_files_with_two_splits_and_metadata ): data_files = data_files_with_one_split_and_metadata if n_splits == 1 else data_files_with_two_splits_and_metadata autofolder = DummyFolderBasedBuilder( data_files=data_files, cache_dir=cache_dir, ) download_manager = StreamingDownloadManager() if streaming else DownloadManager() generated_splits = autofolder._split_generators(download_manager) for (split, files), generated_split in zip(data_files.items(), generated_splits): assert split == generated_split.name expected_num_of_examples = len(files) - 1 generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs)) assert len(generated_examples) == expected_num_of_examples assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples assert all(example["additional_feature"] is not None for _, example in generated_examples) @pytest.mark.parametrize("streaming", [False, True]) def test_data_files_with_metadata_and_archives(streaming, cache_dir, data_files_with_zip_archives): autofolder = DummyFolderBasedBuilder(data_files=data_files_with_zip_archives, cache_dir=cache_dir) download_manager = StreamingDownloadManager() if streaming else DownloadManager() generated_splits = autofolder._split_generators(download_manager) for (split, files), generated_split in zip(data_files_with_zip_archives.items(), generated_splits): assert split == generated_split.name num_of_archives = len(files) expected_num_of_examples = 2 * num_of_archives generated_examples = list(autofolder._generate_examples(**generated_split.gen_kwargs)) assert len(generated_examples) == expected_num_of_examples assert len({example["base"] for _, example in generated_examples}) == expected_num_of_examples assert len({example["additional_feature"] for _, example in generated_examples}) == expected_num_of_examples assert all(example["additional_feature"] is not None for _, example in generated_examples) def test_data_files_with_wrong_metadata_file_name(cache_dir, tmp_path, auto_text_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(auto_text_file, data_dir / "file.txt") metadata_filename = data_dir / "bad_metadata.jsonl" # bad file metadata = textwrap.dedent( """\ {"file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) gen_kwargs = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs generator = autofolder._generate_examples(**gen_kwargs) assert all("additional_feature" not in example for _, example in generator) def test_data_files_with_wrong_file_name_column_in_metadata_file(cache_dir, tmp_path, auto_text_file): data_dir = tmp_path / "data_dir_with_bad_metadata" data_dir.mkdir(parents=True, exist_ok=True) shutil.copyfile(auto_text_file, data_dir / "file.txt") metadata_filename = data_dir / "metadata.jsonl" metadata = textwrap.dedent( # with bad column "bad_file_name" instead of "file_name" """\ {"bad_file_name": "file.txt", "additional_feature": "Dummy file"} """ ) with open(metadata_filename, "w", encoding="utf-8") as f: f.write(metadata) data_files_with_bad_metadata = DataFilesDict.from_patterns(get_data_patterns(str(data_dir)), data_dir.as_posix()) autofolder = DummyFolderBasedBuilder(data_files=data_files_with_bad_metadata, cache_dir=cache_dir) with pytest.raises(ValueError) as exc_info: _ = autofolder._split_generators(StreamingDownloadManager())[0].gen_kwargs assert "`file_name` must be present" in str(exc_info.value)
datasets/tests/packaged_modules/test_folder_based_builder.py/0
{ "file_path": "datasets/tests/packaged_modules/test_folder_based_builder.py", "repo_id": "datasets", "token_count": 8915 }
81
import unittest import warnings from datasets.utils import experimental @experimental def dummy_function(): return "success" class TestExperimentalFlag(unittest.TestCase): def test_experimental_warning(self): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") self.assertEqual(dummy_function(), "success") self.assertEqual(len(w), 1)
datasets/tests/test_experimental.py/0
{ "file_path": "datasets/tests/test_experimental.py", "repo_id": "datasets", "token_count": 152 }
82
# Copyright 2020 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration pytestmark = pytest.mark.integration REQUIRE_FAIRSEQ = {"comet"} _has_fairseq = importlib.util.find_spec("fairseq") is not None UNSUPPORTED_ON_WINDOWS = {"code_eval"} _on_windows = os.name == "nt" REQUIRE_TRANSFORMERS = {"bertscore", "frugalscore", "perplexity"} _has_transformers = importlib.util.find_spec("transformers") is not None def skip_if_metric_requires_fairseq(test_case): @wraps(test_case) def wrapper(self, metric_name): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('"test requires Fairseq"') else: test_case(self, metric_name) return wrapper def skip_if_metric_requires_transformers(test_case): @wraps(test_case) def wrapper(self, metric_name): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('"test requires transformers"') else: test_case(self, metric_name) return wrapper def skip_on_windows_if_not_windows_compatible(test_case): @wraps(test_case) def wrapper(self, metric_name): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('"test not supported on Windows"') else: test_case(self, metric_name) return wrapper def get_local_metric_names(): metrics = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("./metrics/*/")] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names()) @for_all_test_methods( skip_if_metric_requires_fairseq, skip_if_metric_requires_transformers, skip_on_windows_if_not_windows_compatible ) @local class LocalMetricTest(parameterized.TestCase): INTENSIVE_CALLS_PATCHER = {} metric_name = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning") @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning") def test_load_metric(self, metric_name): doctest.ELLIPSIS_MARKER = "[...]" metric_module = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics", metric_name)).module_path ) metric = datasets.load.import_main_class(metric_module.__name__, dataset=False) # check parameters parameters = inspect.signature(metric._compute).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs # run doctest with self.patch_intensive_calls(metric_name, metric_module.__name__): with self.use_local_metrics(): try: results = doctest.testmod(metric_module, verbose=True, raise_on_error=True) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed, 0) self.assertGreater(results.attempted, 1) @slow def test_load_real_metric(self, metric_name): doctest.ELLIPSIS_MARKER = "[...]" metric_module = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics", metric_name)).module_path ) # run doctest with self.use_local_metrics(): results = doctest.testmod(metric_module, verbose=True, raise_on_error=True) self.assertEqual(results.failed, 0) self.assertGreater(results.attempted, 1) @contextmanager def patch_intensive_calls(self, metric_name, module_name): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](module_name): yield else: yield @contextmanager def use_local_metrics(self): def load_local_metric(metric_name, *args, **kwargs): return load_metric(os.path.join("metrics", metric_name), *args, **kwargs) with patch("datasets.load_metric") as mock_load_metric: mock_load_metric.side_effect = load_local_metric yield @classmethod def register_intensive_calls_patcher(cls, metric_name): def wrapper(patcher): patcher = contextmanager(patcher) cls.INTENSIVE_CALLS_PATCHER[metric_name] = patcher return patcher return wrapper # Metrics intensive calls patchers # -------------------------------- @LocalMetricTest.register_intensive_calls_patcher("bleurt") def patch_bleurt(module_name): import tensorflow.compat.v1 as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv", "", "") # handle pytest cli flags class MockedPredictor(Predictor): def predict(self, input_dict): assert len(input_dict["input_ids"]) == 2 return np.array([1.03, 1.04]) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor") as mock_create_predictor: mock_create_predictor.return_value = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore") def patch_bertscore(module_name): import torch def bert_cos_score_idf(model, refs, *args, **kwargs): return torch.tensor([[1.0, 1.0, 1.0]] * len(refs)) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model"), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: mock_bert_cos_score_idf.side_effect = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet") def patch_comet(module_name): def load_from_checkpoint(model_path): class Model: def predict(self, data, *args, **kwargs): assert len(data) == 2 scores = [0.19, 0.92] return scores, sum(scores) / len(scores) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model") as mock_download_model: mock_download_model.return_value = None with patch("comet.load_from_checkpoint") as mock_load_from_checkpoint: mock_load_from_checkpoint.side_effect = load_from_checkpoint yield def test_seqeval_raises_when_incorrect_scheme(): metric = load_metric(os.path.join("metrics", "seqeval")) wrong_scheme = "ERROR" error_message = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}" with pytest.raises(ValueError, match=re.escape(error_message)): metric.compute(predictions=[], references=[], scheme=wrong_scheme)
datasets/tests/test_metric_common.py/0
{ "file_path": "datasets/tests/test_metric_common.py", "repo_id": "datasets", "token_count": 3144 }
83
import asyncio import importlib.metadata import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config def parse_flag_from_env(key, default=False): try: value = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _value = default else: # KEY is set, convert it to True or False. try: _value = strtobool(value) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"If set, {key} must be yes or no.") return _value _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) _run_remote_tests = parse_flag_from_env("RUN_REMOTE", default=False) _run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True) _run_packaged_tests = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression require_lz4 = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") require_py7zr = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") require_zstandard = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio require_sndfile = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib.metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Beam require_beam = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility require_dill_gt_0_3_2 = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows require_not_windows = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def require_faiss(test_case): """ Decorator marking a test that requires Faiss. These tests are skipped when Faiss isn't installed. """ try: import faiss # noqa except ImportError: test_case = unittest.skip("test requires faiss")(test_case) return test_case def require_regex(test_case): """ Decorator marking a test that requires regex. These tests are skipped when Regex isn't installed. """ try: import regex # noqa except ImportError: test_case = unittest.skip("test requires regex")(test_case) return test_case def require_elasticsearch(test_case): """ Decorator marking a test that requires ElasticSearch. These tests are skipped when ElasticSearch isn't installed. """ try: import elasticsearch # noqa except ImportError: test_case = unittest.skip("test requires elasticsearch")(test_case) return test_case def require_sqlalchemy(test_case): """ Decorator marking a test that requires SQLAlchemy. These tests are skipped when SQLAlchemy isn't installed. """ try: import sqlalchemy # noqa except ImportError: test_case = unittest.skip("test requires sqlalchemy")(test_case) return test_case def require_torch(test_case): """ Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. """ if not config.TORCH_AVAILABLE: test_case = unittest.skip("test requires PyTorch")(test_case) return test_case def require_tf(test_case): """ Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed. """ if not config.TF_AVAILABLE: test_case = unittest.skip("test requires TensorFlow")(test_case) return test_case def require_jax(test_case): """ Decorator marking a test that requires JAX. These tests are skipped when JAX isn't installed. """ if not config.JAX_AVAILABLE: test_case = unittest.skip("test requires JAX")(test_case) return test_case def require_pil(test_case): """ Decorator marking a test that requires Pillow. These tests are skipped when Pillow isn't installed. """ if not config.PIL_AVAILABLE: test_case = unittest.skip("test requires Pillow")(test_case) return test_case def require_transformers(test_case): """ Decorator marking a test that requires transformers. These tests are skipped when transformers isn't installed. """ try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(test_case) else: return test_case def require_tiktoken(test_case): """ Decorator marking a test that requires tiktoken. These tests are skipped when transformers isn't installed. """ try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(test_case) else: return test_case def require_spacy(test_case): """ Decorator marking a test that requires spacy. These tests are skipped when they aren't installed. """ try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(test_case) else: return test_case def require_spacy_model(model): """ Decorator marking a test that requires a spacy model. These tests are skipped when they aren't installed. """ def _require_spacy_model(test_case): try: import spacy # noqa F401 spacy.load(model) except ImportError: return unittest.skip("test requires spacy")(test_case) except OSError: return unittest.skip("test requires spacy model '{}'".format(model))(test_case) else: return test_case return _require_spacy_model def require_pyspark(test_case): """ Decorator marking a test that requires pyspark. These tests are skipped when pyspark isn't installed. """ try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(test_case) else: return test_case def require_joblibspark(test_case): """ Decorator marking a test that requires joblibspark. These tests are skipped when pyspark isn't installed. """ try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(test_case) else: return test_case def slow(test_case): """ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. """ if not _run_slow_tests or _run_slow_tests == 0: test_case = unittest.skip("test is slow")(test_case) return test_case def local(test_case): """ Decorator marking a test as local Local tests are run by default. Set the RUN_LOCAL environment variable to a falsy value to not run them. """ if not _run_local_tests or _run_local_tests == 0: test_case = unittest.skip("test is local")(test_case) return test_case def packaged(test_case): """ Decorator marking a test as packaged Packaged tests are run by default. Set the RUN_PACKAGED environment variable to a falsy value to not run them. """ if not _run_packaged_tests or _run_packaged_tests == 0: test_case = unittest.skip("test is packaged")(test_case) return test_case def remote(test_case): """ Decorator marking a test as one that relies on GitHub or the Hugging Face Hub. Remote tests are skipped by default. Set the RUN_REMOTE environment variable to a falsy value to not run them. """ if not _run_remote_tests or _run_remote_tests == 0: test_case = unittest.skip("test requires remote")(test_case) return test_case def for_all_test_methods(*decorators): def decorate(cls): for name, fn in cls.__dict__.items(): if callable(fn) and name.startswith("test"): for decorator in decorators: fn = decorator(fn) setattr(cls, name, fn) return cls return decorate class RequestWouldHangIndefinitelyError(Exception): pass class OfflineSimulationMode(Enum): CONNECTION_FAILS = 0 CONNECTION_TIMES_OUT = 1 HF_DATASETS_OFFLINE_SET_TO_1 = 2 @contextmanager def offline(mode=OfflineSimulationMode.CONNECTION_FAILS, timeout=1e-16): """ Simulate offline mode. There are three offline simulatiom modes: CONNECTION_FAILS (default mode): a ConnectionError is raised for each network call. Connection errors are created by mocking socket.socket CONNECTION_TIMES_OUT: the connection hangs until it times out. The default timeout value is low (1e-16) to speed up the tests. Timeout errors are created by mocking requests.request HF_DATASETS_OFFLINE_SET_TO_1: the HF_DATASETS_OFFLINE environment variable is set to 1. This makes the http/ftp calls of the library instantly fail and raise an OfflineModeEmabled error. """ online_request = requests.Session().request def timeout_request(session, method, url, **kwargs): # Change the url to an invalid url so that the connection hangs invalid_url = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) kwargs["timeout"] = timeout try: return online_request(method, invalid_url, **kwargs) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier e.request.url = url max_retry_error = e.args[0] max_retry_error.args = (max_retry_error.args[0].replace("10.255.255.1", f"OfflineMock[{url}]"),) e.args = (max_retry_error,) raise def raise_connection_error(session, prepared_request, **kwargs): raise requests.ConnectionError("Offline mode is enabled.", request=prepared_request) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send", raise_connection_error): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request", timeout_request): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE", True): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def set_current_working_directory_to_temp_dir(*args, **kwargs): original_working_dir = str(Path().resolve()) with tempfile.TemporaryDirectory(*args, **kwargs) as tmp_dir: try: os.chdir(tmp_dir) yield finally: os.chdir(original_working_dir) @contextmanager def assert_arrow_memory_increases(): import gc gc.collect() previous_allocated_memory = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def assert_arrow_memory_doesnt_increase(): import gc gc.collect() previous_allocated_memory = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def is_rng_equal(rng1, rng2): return deepcopy(rng1).integers(0, 100, 10).tolist() == deepcopy(rng2).integers(0, 100, 10).tolist() def xfail_if_500_502_http_error(func): import decorator from requests.exceptions import HTTPError def _wrapper(func, *args, **kwargs): try: return func(*args, **kwargs) except HTTPError as err: if str(err).startswith("500") or str(err).startswith("502"): pytest.xfail(str(err)) raise err return decorator.decorator(_wrapper, func) # --- distributed testing functions --- # # copied from transformers # originally adapted from https://stackoverflow.com/a/59041913/9201239 class _RunOutput: def __init__(self, returncode, stdout, stderr): self.returncode = returncode self.stdout = stdout self.stderr = stderr async def _read_stream(stream, callback): while True: line = await stream.readline() if line: callback(line) else: break async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput: if echo: print("\nRunning: ", " ".join(cmd)) p = await asyncio.create_subprocess_exec( cmd[0], *cmd[1:], stdin=stdin, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=env, ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) out = [] err = [] def tee(line, sink, pipe, label=""): line = line.decode("utf-8").rstrip() sink.append(line) if not quiet: print(label, line, file=pipe) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout, lambda line: tee(line, out, sys.stdout, label="stdout:")), _read_stream(p.stderr, lambda line: tee(line, err, sys.stderr, label="stderr:")), ], timeout=timeout, ) return _RunOutput(await p.wait(), out, err) def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: loop = asyncio.get_event_loop() result = loop.run_until_complete( _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) ) cmd_str = " ".join(cmd) if result.returncode > 0: stderr = "\n".join(result.stderr) raise RuntimeError( f"'{cmd_str}' failed with returncode {result.returncode}\n\n" f"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"'{cmd_str}' produced no output.") return result def pytest_xdist_worker_id(): """ Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0 if `-n 1` or `pytest-xdist` isn't being used. """ worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0") worker = re.sub(r"^gw", "", worker, 0, re.M) return int(worker) def get_torch_dist_unique_port(): """ Returns a port number that can be fed to `torchrun`'s `--master_port` argument. Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same port at once. """ port = 29500 uniq_delta = pytest_xdist_worker_id() return port + uniq_delta
datasets/tests/utils.py/0
{ "file_path": "datasets/tests/utils.py", "repo_id": "datasets", "token_count": 6318 }
84
<jupyter_start><jupyter_text>Unit 6: Advantage Actor Critic (A2C) using Robotics Simulations with Panda-Gym 🤖In this notebook, you'll learn to use A2C with [Panda-Gym](https://github.com/qgallouedec/panda-gym). You're going **to train a robotic arm** (Franka Emika Panda robot) to perform a task:- `Reach`: the robot must place its end-effector at a target position.After that, you'll be able **to train in other robotics tasks**. 🎮 Environments:- [Panda-Gym](https://github.com/qgallouedec/panda-gym)📚 RL-Library:- [Stable-Baselines3](https://stable-baselines3.readthedocs.io/) We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). Objectives of this notebook 🏆At the end of the notebook, you will:- Be able to use **Panda-Gym**, the environment library.- Be able to **train robots using A2C**.- Understand why **we need to normalize the input**.- Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. This notebook is from the Deep Reinforcement Learning CourseIn this free course, you will:- 📖 Study Deep Reinforcement Learning in **theory and practice**.- 🧑‍💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- 🤖 Train **agents in unique environments**And more check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-courseDon’t forget to **sign up to the course** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 Study [Actor-Critic methods by reading Unit 6](https://huggingface.co/deep-rl-course/unit6/introduction) 🤗 Let's train our first robots 🤖 To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process), you need to push your trained model to the Hub and get the following results:- `PandaReachDense-v3` get a result of >= -3.5.To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward**For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process Set the GPU 💪- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Create a virtual display 🔽During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames).Hence the following cell will install the librairies and create and run a virtual screen 🖥<jupyter_code>%%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !pip3 install pyvirtualdisplay # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start()<jupyter_output><empty_output><jupyter_text>Install dependencies 🔽The first step is to install the dependencies, we’ll install multiple ones:- `gymnasium`- `panda-gym`: Contains the robotics arm environments.- `stable-baselines3`: The SB3 deep reinforcement learning library.- `huggingface_sb3`: Additional code for Stable-baselines3 to load and upload models from the Hugging Face 🤗 Hub.- `huggingface_hub`: Library allowing anyone to work with the Hub repositories.⏲ The installation can **take 10 minutes**.<jupyter_code>!pip install stable-baselines3[extra] !pip install gymnasium !pip install huggingface_sb3 !pip install huggingface_hub !pip install panda_gym<jupyter_output><empty_output><jupyter_text>Import the packages 📦<jupyter_code>import os import gymnasium as gym import panda_gym from huggingface_sb3 import load_from_hub, package_to_hub from stable_baselines3 import A2C from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize from stable_baselines3.common.env_util import make_vec_env from huggingface_hub import notebook_login<jupyter_output><empty_output><jupyter_text>PandaReachDense-v3 🦾The agent we're going to train is a robotic arm that needs to do controls (moving the arm and using the end-effector).In robotics, the *end-effector* is the device at the end of a robotic arm designed to interact with the environment.In `PandaReach`, the robot must place its end-effector at a target position (green ball).We're going to use the dense version of this environment. It means we'll get a *dense reward function* that **will provide a reward at each timestep** (the closer the agent is to completing the task, the higher the reward). Contrary to a *sparse reward function* where the environment **return a reward if and only if the task is completed**.Also, we're going to use the *End-effector displacement control*, it means the **action corresponds to the displacement of the end-effector**. We don't control the individual motion of each joint (joint control).This way **the training will be easier**. Create the environment The environment 🎮In `PandaReachDense-v3` the robotic arm must place its end-effector at a target position (green ball).<jupyter_code>env_id = "PandaReachDense-v3" # Create the env env = gym.make(env_id) # Get the state space and action space s_size = env.observation_space.shape a_size = env.action_space print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation<jupyter_output><empty_output><jupyter_text>The observation space **is a dictionary with 3 different elements**:- `achieved_goal`: (x,y,z) position of the goal.- `desired_goal`: (x,y,z) distance between the goal position and the current object position.- `observation`: position (x,y,z) and velocity of the end-effector (vx, vy, vz).Given it's a dictionary as observation, **we will need to use a MultiInputPolicy policy instead of MlpPolicy**.<jupyter_code>print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>The action space is a vector with 3 values:- Control x, y, z movement Normalize observation and rewards A good practice in reinforcement learning is to [normalize input features](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html).For that purpose, there is a wrapper that will compute a running average and standard deviation of input features.We also normalize rewards with this same wrapper by adding `norm_reward = True`[You should check the documentation to fill this cell](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.htmlvecnormalize)<jupyter_code>env = make_vec_env(env_id, n_envs=4) # Adding this wrapper to normalize the observation and the reward env = # TODO: Add the wrapper<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>env = make_vec_env(env_id, n_envs=4) env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.)<jupyter_output><empty_output><jupyter_text>Create the A2C Model 🤖For more information about A2C implementation with StableBaselines3 check: https://stable-baselines3.readthedocs.io/en/master/modules/a2c.htmlnotesTo find the best parameters I checked the [official trained agents by Stable-Baselines3 team](https://huggingface.co/sb3).<jupyter_code>model = # Create the A2C model and try to find the best parameters<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>model = A2C(policy = "MultiInputPolicy", env = env, verbose=1)<jupyter_output><empty_output><jupyter_text>Train the A2C agent 🏃- Let's train our agent for 1,000,000 timesteps, don't forget to use GPU on Colab. It will take approximately ~25-40min<jupyter_code>model.learn(1_000_000) # Save the model and VecNormalize statistics when saving the agent model.save("a2c-PandaReachDense-v3") env.save("vec_normalize.pkl")<jupyter_output><empty_output><jupyter_text>Evaluate the agent 📈- Now that's our agent is trained, we need to **check its performance**.- Stable-Baselines3 provides a method to do that: `evaluate_policy`<jupyter_code>from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize # Load the saved statistics eval_env = DummyVecEnv([lambda: gym.make("PandaReachDense-v3")]) eval_env = VecNormalize.load("vec_normalize.pkl", eval_env) # We need to override the render_mode eval_env.render_mode = "rgb_array" # do not update them at test time eval_env.training = False # reward normalization is not needed at test time eval_env.norm_reward = False # Load the agent model = A2C.load("a2c-PandaReachDense-v3") mean_reward, std_reward = evaluate_policy(model, eval_env) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}")<jupyter_output><empty_output><jupyter_text>Publish your trained model on the Hub 🔥Now that we saw we got good results after the training, we can publish our trained model on the Hub with one line of code.📚 The libraries documentation 👉 https://github.com/huggingface/huggingface_sb3/tree/mainhugging-face--x-stable-baselines3-v20 By using `package_to_hub`, as we already mentionned in the former units, **you evaluate, record a replay, generate a model card of your agent and push it to the hub**.This way:- You can **showcase our work** 🔥- You can **visualize your agent playing** 👀- You can **share with the community an agent that others can use** 💾- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**- Copy the token- Run the cell below and paste the token<jupyter_code>notebook_login() !git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function For this environment, **running this cell can take approximately 10min**<jupyter_code>from huggingface_sb3 import package_to_hub package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"ThomasSimonini/a2c-{env_id}", # Change the username commit_message="Initial commit", )<jupyter_output><empty_output><jupyter_text>Some additional challenges 🏆The best way to learn **is to try things by your own**! Why not trying `PandaPickAndPlace-v3`?If you want to try more advanced tasks for panda-gym, you need to check what was done using **TQC or SAC** (a more sample-efficient algorithm suited for robotics tasks). In real robotics, you'll use a more sample-efficient algorithm for a simple reason: contrary to a simulation **if you move your robotic arm too much, you have a risk of breaking it**.PandaPickAndPlace-v1 (this model uses the v1 version of the environment): https://huggingface.co/sb3/tqc-PandaPickAndPlace-v1And don't hesitate to check panda-gym documentation here: https://panda-gym.readthedocs.io/en/latest/usage/train_with_sb3.htmlWe provide you the steps to train another agent (optional):1. Define the environment called "PandaPickAndPlace-v3"2. Make a vectorized environment3. Add a wrapper to normalize the observations and rewards. [Check the documentation](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.htmlvecnormalize)4. Create the A2C Model (don't forget verbose=1 to print the training logs).5. Train it for 1M Timesteps6. Save the model and VecNormalize statistics when saving the agent7. Evaluate your agent8. Publish your trained model on the Hub 🔥 with `package_to_hub` Solution (optional)<jupyter_code># 1 - 2 env_id = "PandaPickAndPlace-v3" env = make_vec_env(env_id, n_envs=4) # 3 env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.) # 4 model = A2C(policy = "MultiInputPolicy", env = env, verbose=1) # 5 model.learn(1_000_000) # 6 model_name = "a2c-PandaPickAndPlace-v3"; model.save(model_name) env.save("vec_normalize.pkl") # 7 from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize # Load the saved statistics eval_env = DummyVecEnv([lambda: gym.make("PandaPickAndPlace-v3")]) eval_env = VecNormalize.load("vec_normalize.pkl", eval_env) # do not update them at test time eval_env.training = False # reward normalization is not needed at test time eval_env.norm_reward = False # Load the agent model = A2C.load(model_name) mean_reward, std_reward = evaluate_policy(model, eval_env) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}") # 8 package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"ThomasSimonini/a2c-{env_id}", # TODO: Change the username commit_message="Initial commit", )<jupyter_output><empty_output>
deep-rl-class/notebooks/unit6/unit6.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit6/unit6.ipynb", "repo_id": "deep-rl-class", "token_count": 4384 }
85
# Introduction to Deep Reinforcement Learning [[introduction-to-deep-reinforcement-learning]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/thumbnail.jpg" alt="Unit 1 thumbnail" width="100%"> Welcome to the most fascinating topic in Artificial Intelligence: **Deep Reinforcement Learning.** Deep RL is a type of Machine Learning where an agent learns **how to behave** in an environment **by performing actions** and **seeing the results.** In this first unit, **you'll learn the foundations of Deep Reinforcement Learning.** Then, you'll **train your Deep Reinforcement Learning agent, a lunar lander to land correctly on the Moon** using <a href="https://stable-baselines3.readthedocs.io/en/master/"> Stable-Baselines3 </a>, a Deep Reinforcement Learning library. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/lunarLander.gif" alt="LunarLander"> And finally, you'll **upload this trained agent to the Hugging Face Hub 🤗, a free, open platform where people can share ML models, datasets, and demos.** It's essential **to master these elements** before diving into implementing Deep Reinforcement Learning agents. The goal of this chapter is to give you solid foundations. After this unit, in a bonus unit, you'll be **able to train Huggy the Dog 🐶 to fetch the stick and play with him 🤗**. <video src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/huggy.mp4" type="video/mp4" controls autoplay loop mute /> So let's get started! 🚀
deep-rl-class/units/en/unit1/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 477 }
86
# A Q-Learning example [[q-learning-example]] To better understand Q-Learning, let's take a simple example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-Example-2.jpg" alt="Maze-Example"/> - You're a mouse in this tiny maze. You always **start at the same starting point.** - The goal is **to eat the big pile of cheese at the bottom right-hand corner** and avoid the poison. After all, who doesn't like cheese? - The episode ends if we eat the poison, **eat the big pile of cheese**, or if we take more than five steps. - The learning rate is 0.1 - The discount rate (gamma) is 0.99 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-1.jpg" alt="Maze-Example"/> The reward function goes like this: - **+0:** Going to a state with no cheese in it. - **+1:** Going to a state with a small cheese in it. - **+10:** Going to the state with the big pile of cheese. - **-10:** Going to the state with the poison and thus dying. - **+0** If we take more than five steps. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-2.jpg" alt="Maze-Example"/> To train our agent to have an optimal policy (so a policy that goes right, right, down), **we will use the Q-Learning algorithm**. ## Step 1: Initialize the Q-table [[step1]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Example-1.jpg" alt="Maze-Example"/> So, for now, **our Q-table is useless**; we need **to train our Q-function using the Q-Learning algorithm.** Let's do it for 2 training timesteps: Training timestep 1: ## Step 2: Choose an action using the Epsilon Greedy Strategy [[step2]] Because epsilon is big (= 1.0), I take a random action. In this case, I go right. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-3.jpg" alt="Maze-Example"/> ## Step 3: Perform action At, get Rt+1 and St+1 [[step3]] By going right, I get a small cheese, so \\(R_{t+1} = 1\\) and I'm in a new state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-4.jpg" alt="Maze-Example"/> ## Step 4: Update Q(St, At) [[step4]] We can now update \\(Q(S_t, A_t)\\) using our formula. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-5.jpg" alt="Maze-Example"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Example-4.jpg" alt="Maze-Example"/> Training timestep 2: ## Step 2: Choose an action using the Epsilon Greedy Strategy [[step2-2]] **I take a random action again, since epsilon=0.99 is big**. (Notice we decay epsilon a little bit because, as the training progress, we want less and less exploration). I took the action 'down'. **This is not a good action since it leads me to the poison.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-6.jpg" alt="Maze-Example"/> ## Step 3: Perform action At, get Rt+1 and St+1 [[step3-3]] Because I ate poison, **I get \\(R_{t+1} = -10\\), and I die.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-7.jpg" alt="Maze-Example"/> ## Step 4: Update Q(St, At) [[step4-4]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-8.jpg" alt="Maze-Example"/> Because we're dead, we start a new episode. But what we see here is that, **with two explorations steps, my agent became smarter.** As we continue exploring and exploiting the environment and updating Q-values using the TD target, the **Q-table will give us a better and better approximation. At the end of the training, we'll get an estimate of the optimal Q-function.**
deep-rl-class/units/en/unit2/q-learning-example.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/q-learning-example.mdx", "repo_id": "deep-rl-class", "token_count": 1402 }
87
# The advantages and disadvantages of policy-gradient methods At this point, you might ask, "but Deep Q-Learning is excellent! Why use policy-gradient methods?". To answer this question, let's study the **advantages and disadvantages of policy-gradient methods**. ## Advantages There are multiple advantages over value-based methods. Let's see some of them: ### The simplicity of integration We can estimate the policy directly without storing additional data (action values). ### Policy-gradient methods can learn a stochastic policy Policy-gradient methods can **learn a stochastic policy while value functions can't**. This has two consequences: 1. We **don't need to implement an exploration/exploitation trade-off by hand**. Since we output a probability distribution over actions, the agent explores **the state space without always taking the same trajectory.** 2. We also get rid of the problem of **perceptual aliasing**. Perceptual aliasing is when two states seem (or are) the same but need different actions. Let's take an example: we have an intelligent vacuum cleaner whose goal is to suck the dust and avoid killing the hamsters. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster1.jpg" alt="Hamster 1"/> </figure> Our vacuum cleaner can only perceive where the walls are. The problem is that the **two red (colored) states are aliased states because the agent perceives an upper and lower wall for each**. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster2.jpg" alt="Hamster 1"/> </figure> Under a deterministic policy, the policy will either always move right when in a red state or always move left. **Either case will cause our agent to get stuck and never suck the dust**. Under a value-based Reinforcement learning algorithm, we learn a **quasi-deterministic policy** ("greedy epsilon strategy"). Consequently, our agent can **spend a lot of time before finding the dust**. On the other hand, an optimal stochastic policy **will randomly move left or right in red (colored) states**. Consequently, **it will not be stuck and will reach the goal state with a high probability**. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster3.jpg" alt="Hamster 1"/> </figure> ### Policy-gradient methods are more effective in high-dimensional action spaces and continuous actions spaces The problem with Deep Q-learning is that their **predictions assign a score (maximum expected future reward) for each possible action**, at each time step, given the current state. But what if we have an infinite possibility of actions? For instance, with a self-driving car, at each state, you can have a (near) infinite choice of actions (turning the wheel at 15°, 17.2°, 19,4°, honking, etc.). **We'll need to output a Q-value for each possible action**! And **taking the max action of a continuous output is an optimization problem itself**! Instead, with policy-gradient methods, we output a **probability distribution over actions.** ### Policy-gradient methods have better convergence properties In value-based methods, we use an aggressive operator to **change the value function: we take the maximum over Q-estimates**. Consequently, the action probabilities may change dramatically for an arbitrarily small change in the estimated action values if that change results in a different action having the maximal value. For instance, if during the training, the best action was left (with a Q-value of 0.22) and the training step after it's right (since the right Q-value becomes 0.23), we dramatically changed the policy since now the policy will take most of the time right instead of left. On the other hand, in policy-gradient methods, stochastic policy action preferences (probability of taking action) **change smoothly over time**. ## Disadvantages Naturally, policy-gradient methods also have some disadvantages: - **Frequently, policy-gradient methods converges to a local maximum instead of a global optimum.** - Policy-gradient goes slower, **step by step: it can take longer to train (inefficient).** - Policy-gradient can have high variance. We'll see in the actor-critic unit why, and how we can solve this problem. 👉 If you want to go deeper into the advantages and disadvantages of policy-gradient methods, [you can check this video](https://youtu.be/y3oqOjHilio).
deep-rl-class/units/en/unit4/advantages-disadvantages.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/advantages-disadvantages.mdx", "repo_id": "deep-rl-class", "token_count": 1184 }
88
# Quiz The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: Which of the following tools are specifically designed for video games development? <Question choices={[ { text: "Unity (C#)", explain: "", correct: true, }, { text: "Unreal Engine (C++)", explain: "", correct: true, }, { text: "Godot (GDScript, C++, C#)", explain: "", correct: true, }, { text: "JetBrains' Rider", explain: "Although useful for its support of C# for Unity, it's not a video games development IDE", correct: false, }, { text: "JetBrains' CLion", explain: "Although useful for its support of C++ for Unreal Engine, it's not a video games development IDE", correct: false, }, { text: "Microsoft Visual Studio and Visual Studio Code", explain: "Including support for both Unity and Unreal, they are generic IDEs, not video games oriented.", correct: false, }, ]} /> ### Q2: What of the following statements are true about Unity ML-Agents? <Question choices={[ { text: "Unity ´Scene´ objects can be used to create learning environments", explain: "", correct: true, }, { text: "Unit ML-Agents allows you to create and train your agents using Reinforcement Learning", explain: "", correct: true, }, { text: "Its `Communicator` component manages the communication between Unity's C# Environments/Agents and a Python back-end", explain: "", correct: true, }, { text: "The training process uses Reinforcement Learning algorithms, implemented in Pytorch", explain: "", correct: true, }, { text: "Unity ML-Agents only support Proximal Policy Optimization (PPO)", explain: "No, Unity ML-Agents supports several families of algorithms, including Actor-Critic which is going to be explained in the next section", correct: false, }, { text: "It includes a Gym Wrapper and a multi-agent version of it called `PettingZoo`", explain: "", correct: true, }, ]} /> ### Q3: Fill the missing letters - In Unity ML-Agents, the Policy of an Agent is called a b _ _ _ n - The component in charge of orchestrating the agents is called the _ c _ _ _ m _ <details> <summary>Solution</summary> - b r a i n - a c a d e m y </details> ### Q4: Define with your own words what is a `raycast` <details> <summary>Solution</summary> A raycast is (most of the times) a linear projection, as a `laser` which aims to detect collisions through objects. </details> ### Q5: Which are the differences between capturing the environment using `frames` or `raycasts`? <Question choices={[ { text: "By using `frames`, the environment is defined by each of the pixels of the screen. By using `raycasts`, we only send a sample of those pixels.", explain: "`Raycasts` don't have anything to do with pixels. They are linear projections (lasers) that we spawn to look for collisions.", correct: false, }, { text: "By using `raycasts`, the environment is defined by each of the pixels of the screen. By using `frames`, we spawn a (usually) line to check what objects it collides with", explain: "It's the other way around - `frames` collect pixels, `raycasts` check for collisions.", correct: false, }, { text: "By using `frames`, we collect all the pixels of the screen, which define the environment. By using `raycast`, we don't use pixels, we spawn (normally) lines and check their collisions", explain: "", correct: true, }, ]} /> ### Q6: Name several environment and agent input variables used to train the agent in the Snowball or Pyramid environments <details> <summary>Solution</summary> - Collisions of the raycasts spawned from the agent detecting blocks, (invisible) walls, stones, our target, switches, etc. - Traditional inputs describing agent features, as its speed - Boolean vars, as the switch (on/off) in Pyramids or the `can I shoot?` in the SnowballTarget. </details> Congrats on finishing this Quiz 🥳, if you missed some elements, take time to read the chapter again to reinforce (😏) your knowledge.
deep-rl-class/units/en/unit5/quiz.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/quiz.mdx", "repo_id": "deep-rl-class", "token_count": 1511 }
89
# Self-Play: a classic technique to train competitive agents in adversarial games Now that we've studied the basics of multi-agents, we're ready to go deeper. As mentioned in the introduction, we're going **to train agents in an adversarial game with SoccerTwos, a 2vs2 game**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption> </figure> ## What is Self-Play? Training agents correctly in an adversarial game can be **quite complex**. On the one hand, we need to find how to get a well-trained opponent to play against your training agent. And on the other hand, if you find a very good trained opponent, how will your agent improve its policy when the opponent is too strong? Think of a child that just started to learn soccer. Playing against a very good soccer player will be useless since it will be too hard to win or at least get the ball from time to time. So the child will continuously lose without having time to learn a good policy. The best solution would be **to have an opponent that is on the same level as the agent and will upgrade its level as the agent upgrades its own**. Because if the opponent is too strong, we’ll learn nothing; if it is too weak, we’ll overlearn useless behavior against a stronger opponent then. This solution is called *self-play*. In self-play, **the agent uses former copies of itself (of its policy) as an opponent**. This way, the agent will play against an agent of the same level (challenging but not too much), have opportunities to gradually improve its policy, and then update its opponent as it becomes better. It’s a way to bootstrap an opponent and progressively increase the opponent's complexity. It’s the same way humans learn in competition: - We start to train against an opponent of similar level - Then we learn from it, and when we acquire some skills, we can move further with stronger opponents. We do the same with self-play: - We **start with a copy of our agent as an opponent** this way, this opponent is on a similar level. - We **learn from it** and, when we acquire some skills, we **update our opponent with a more recent copy of our training policy**. The theory behind self-play is not something new. It was already used by Arthur Samuel’s checker player system in the fifties and by Gerald Tesauro’s TD-Gammon in 1995. If you want to learn more about the history of self-play [check out this very good blogpost by Andrew Cohen](https://blog.unity.com/technology/training-intelligent-adversaries-using-self-play-with-ml-agents) ## Self-Play in MLAgents Self-Play is integrated into the MLAgents library and is managed by multiple hyperparameters that we’re going to study. But the main focus, as explained in the documentation, is the **tradeoff between the skill level and generality of the final policy and the stability of learning**. Training against a set of slowly changing or unchanging adversaries with low diversity **results in more stable training. But a risk to overfit if the change is too slow.** So we need to control: - How **often we change opponents** with the `swap_steps` and `team_change` parameters. - The **number of opponents saved** with the `window` parameter. A larger value of `window`  means that an agent's pool of opponents will contain a larger diversity of behaviors since it will contain policies from earlier in the training run. - The **probability of playing against the current self vs opponent** sampled from the pool with `play_against_latest_model_ratio`. A larger value of `play_against_latest_model_ratio`  indicates that an agent will be playing against the current opponent more often. - The **number of training steps before saving a new opponent** with `save_steps` parameters. A larger value of `save_steps`  will yield a set of opponents that cover a wider range of skill levels and possibly play styles since the policy receives more training. To get more details about these hyperparameters, you definitely need [to check out this part of the documentation](https://github.com/Unity-Technologies/ml-agents/blob/develop/docs/Training-Configuration-File.md#self-play) ## The ELO Score to evaluate our agent ### What is ELO Score? In adversarial games, tracking the **cumulative reward is not always a meaningful metric to track the learning progress:** because this metric is **dependent only on the skill of the opponent.** Instead, we’re using an ***ELO rating system*** (named after Arpad Elo) that calculates the **relative skill level** between 2 players from a given population in a zero-sum game. In a zero-sum game: one agent wins, and the other agent loses. It’s a mathematical representation of a situation in which each participant’s gain or loss of utility **is exactly balanced by the gain or loss of the utility of the other participants.** We talk about zero-sum games because the sum of utility is equal to zero. This ELO (starting at a specific score: frequently 1200) can decrease initially but should increase progressively during the training. The Elo system is **inferred from the losses and draws against other players.** It means that player ratings depend **on the ratings of their opponents and the results scored against them.** Elo defines an Elo score that is the relative skills of a player in a zero-sum game. **We say relative because it depends on the performance of opponents.** The central idea is to think of the performance of a player **as a random variable that is normally distributed.** The difference in rating between 2 players serves as **the predictor of the outcomes of a match.** If the player wins, but the probability of winning is high, it will only win a few points from its opponent since it means that it is much stronger than it. After every game: - The winning player takes **points from the losing one.** - The number of points is determined **by the difference in the 2 players ratings (hence relative).** - If the higher-rated player wins → few points will be taken from the lower-rated player. - If the lower-rated player wins → a lot of points will be taken from the high-rated player. - If it’s a draw → the lower-rated player gains a few points from the higher. So if A and B have rating Ra, and Rb, then the **expected scores are** given by: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/elo1.png" alt="ELO Score"/> Then, at the end of the game, we need to update the player’s actual Elo score. We use a linear adjustment **proportional to the amount by which the player over-performed or under-performed.** We also define a maximum adjustment rating per game: K-factor. - K=16 for master. - K=32 for weaker players. If Player A has Ea points but scored Sa points, then the player’s rating is updated using the formula: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/elo2.png" alt="ELO Score"/> ### Example If we take an example: Player A has a rating of 2600 Player B has a rating of 2300 - We first calculate the expected score: \\(E_{A} = \frac{1}{1+10^{(2300-2600)/400}} = 0.849 \\) \\(E_{B} = \frac{1}{1+10^{(2600-2300)/400}} = 0.151 \\) - If the organizers determined that K=16 and A wins, the new rating would be: \\(ELO_A = 2600 + 16*(1-0.849) = 2602 \\) \\(ELO_B = 2300 + 16*(0-0.151) = 2298 \\) - If the organizers determined that K=16 and B wins, the new rating would be: \\(ELO_A = 2600 + 16*(0-0.849) = 2586 \\) \\(ELO_B = 2300 + 16 *(1-0.151) = 2314 \\) ### The Advantages Using the ELO score has multiple advantages: - Points are **always balanced** (more points are exchanged when there is an unexpected outcome, but the sum is always the same). - It is a **self-corrected system** since if a player wins against a weak player, they will only win a few points. - It **works with team games**: we calculate the average for each team and use it in Elo. ### The Disadvantages - ELO **does not take into account the individual contribution** of each people in the team. - Rating deflation: **a good rating requires skill over time to keep the same rating**. - **Can’t compare rating in history**.
deep-rl-class/units/en/unit7/self-play.mdx/0
{ "file_path": "deep-rl-class/units/en/unit7/self-play.mdx", "repo_id": "deep-rl-class", "token_count": 2245 }
90
# Hands-on [[hands-on]] Now that you've learned to use Optuna, here are some ideas to apply what you've learned: 1️⃣ **Beat your LunarLander-v2 agent results**, by using Optuna to find a better set of hyperparameters. You can also try with another environment, such as MountainCar-v0 and CartPole-v1. 2️⃣ **Beat your SpaceInvaders agent results**. By doing this, you'll see how valuable and powerful Optuna can be in training better agents. Have fun! Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill out this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unitbonus2/hands-on.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus2/hands-on.mdx", "repo_id": "deep-rl-class", "token_count": 207 }
91
import argparse import sys sys.path.append(".") from base_classes import InpaintingBenchmark # noqa: E402 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--ckpt", type=str, default="runwayml/stable-diffusion-v1-5", choices=[ "runwayml/stable-diffusion-v1-5", "stabilityai/stable-diffusion-2-1", "stabilityai/stable-diffusion-xl-base-1.0", ], ) parser.add_argument("--batch_size", type=int, default=1) parser.add_argument("--num_inference_steps", type=int, default=50) parser.add_argument("--model_cpu_offload", action="store_true") parser.add_argument("--run_compile", action="store_true") args = parser.parse_args() benchmark_pipe = InpaintingBenchmark(args) benchmark_pipe.benchmark(args)
diffusers/benchmarks/benchmark_sd_inpainting.py/0
{ "file_path": "diffusers/benchmarks/benchmark_sd_inpainting.py", "repo_id": "diffusers", "token_count": 362 }
92
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> ### Translating the Diffusers documentation into your language As part of our mission to democratize machine learning, we'd love to make the Diffusers library available in many more languages! Follow the steps below if you want to help translate the documentation into your language 🙏. **🗞️ Open an issue** To get started, navigate to the [Issues](https://github.com/huggingface/diffusers/issues) page of this repo and check if anyone else has opened an issue for your language. If not, open a new issue by selecting the "🌐 Translating a New Language?" from the "New issue" button. Once an issue exists, post a comment to indicate which chapters you'd like to work on, and we'll add your name to the list. **🍴 Fork the repository** First, you'll need to [fork the Diffusers repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). You can do this by clicking on the **Fork** button on the top-right corner of this repo's page. Once you've forked the repo, you'll want to get the files on your local machine for editing. You can do that by cloning the fork with Git as follows: ```bash git clone https://github.com/<YOUR-USERNAME>/diffusers.git ``` **📋 Copy-paste the English version with a new language code** The documentation files are in one leading directory: - [`docs/source`](https://github.com/huggingface/diffusers/tree/main/docs/source): All the documentation materials are organized here by language. You'll only need to copy the files in the [`docs/source/en`](https://github.com/huggingface/diffusers/tree/main/docs/source/en) directory, so first navigate to your fork of the repo and run the following: ```bash cd ~/path/to/diffusers/docs cp -r source/en source/<LANG-ID> ``` Here, `<LANG-ID>` should be one of the ISO 639-1 or ISO 639-2 language codes -- see [here](https://www.loc.gov/standards/iso639-2/php/code_list.php) for a handy table. **✍️ Start translating** The fun part comes - translating the text! The first thing we recommend is translating the part of the `_toctree.yml` file that corresponds to your doc chapter. This file is used to render the table of contents on the website. > 🙋 If the `_toctree.yml` file doesn't yet exist for your language, you can create one by copy-pasting from the English version and deleting the sections unrelated to your chapter. Just make sure it exists in the `docs/source/<LANG-ID>/` directory! The fields you should add are `local` (with the name of the file containing the translation; e.g. `autoclass_tutorial`), and `title` (with the title of the doc in your language; e.g. `Load pretrained instances with an AutoClass`) -- as a reference, here is the `_toctree.yml` for [English](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml): ```yaml - sections: - local: pipeline_tutorial # Do not change this! Use the same name for your .md file title: Pipelines for inference # Translate this! ... title: Tutorials # Translate this! ``` Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter. > 🙋 If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/diffusers/issues) and tag @patrickvonplaten.
diffusers/docs/TRANSLATING.md/0
{ "file_path": "diffusers/docs/TRANSLATING.md", "repo_id": "diffusers", "token_count": 1101 }
93
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Tiny AutoEncoder Tiny AutoEncoder for Stable Diffusion (TAESD) was introduced in [madebyollin/taesd](https://github.com/madebyollin/taesd) by Ollin Boer Bohan. It is a tiny distilled version of Stable Diffusion's VAE that can quickly decode the latents in a [`StableDiffusionPipeline`] or [`StableDiffusionXLPipeline`] almost instantly. To use with Stable Diffusion v-2.1: ```python import torch from diffusers import DiffusionPipeline, AutoencoderTiny pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", torch_dtype=torch.float16 ) pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesd", torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "slice of delicious New York-style berry cheesecake" image = pipe(prompt, num_inference_steps=25).images[0] image ``` To use with Stable Diffusion XL 1.0 ```python import torch from diffusers import DiffusionPipeline, AutoencoderTiny pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ) pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taesdxl", torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "slice of delicious New York-style berry cheesecake" image = pipe(prompt, num_inference_steps=25).images[0] image ``` ## AutoencoderTiny [[autodoc]] AutoencoderTiny ## AutoencoderTinyOutput [[autodoc]] models.autoencoders.autoencoder_tiny.AutoencoderTinyOutput
diffusers/docs/source/en/api/models/autoencoder_tiny.md/0
{ "file_path": "diffusers/docs/source/en/api/models/autoencoder_tiny.md", "repo_id": "diffusers", "token_count": 671 }
94
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Outputs All model outputs are subclasses of [`~utils.BaseOutput`], data structures containing all the information returned by the model. The outputs can also be used as tuples or dictionaries. For example: ```python from diffusers import DDIMPipeline pipeline = DDIMPipeline.from_pretrained("google/ddpm-cifar10-32") outputs = pipeline() ``` The `outputs` object is a [`~pipelines.ImagePipelineOutput`] which means it has an image attribute. You can access each attribute as you normally would or with a keyword lookup, and if that attribute is not returned by the model, you will get `None`: ```python outputs.images outputs["images"] ``` When considering the `outputs` object as a tuple, it only considers the attributes that don't have `None` values. For instance, retrieving an image by indexing into it returns the tuple `(outputs.images)`: ```python outputs[:1] ``` <Tip> To check a specific pipeline or model output, refer to its corresponding API documentation. </Tip> ## BaseOutput [[autodoc]] utils.BaseOutput - to_tuple ## ImagePipelineOutput [[autodoc]] pipelines.ImagePipelineOutput ## FlaxImagePipelineOutput [[autodoc]] pipelines.pipeline_flax_utils.FlaxImagePipelineOutput ## AudioPipelineOutput [[autodoc]] pipelines.AudioPipelineOutput ## ImageTextPipelineOutput [[autodoc]] ImageTextPipelineOutput
diffusers/docs/source/en/api/outputs.md/0
{ "file_path": "diffusers/docs/source/en/api/outputs.md", "repo_id": "diffusers", "token_count": 555 }
95
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DiT [Scalable Diffusion Models with Transformers](https://huggingface.co/papers/2212.09748) (DiT) is by William Peebles and Saining Xie. The abstract from the paper is: *We explore a new class of diffusion models based on the transformer architecture. We train latent diffusion models of images, replacing the commonly-used U-Net backbone with a transformer that operates on latent patches. We analyze the scalability of our Diffusion Transformers (DiTs) through the lens of forward pass complexity as measured by Gflops. We find that DiTs with higher Gflops -- through increased transformer depth/width or increased number of input tokens -- consistently have lower FID. In addition to possessing good scalability properties, our largest DiT-XL/2 models outperform all prior diffusion models on the class-conditional ImageNet 512x512 and 256x256 benchmarks, achieving a state-of-the-art FID of 2.27 on the latter.* The original codebase can be found at [facebookresearch/dit](https://github.com/facebookresearch/dit). <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## DiTPipeline [[autodoc]] DiTPipeline - all - __call__ ## ImagePipelineOutput [[autodoc]] pipelines.ImagePipelineOutput
diffusers/docs/source/en/api/pipelines/dit.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/dit.md", "repo_id": "diffusers", "token_count": 533 }
96
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Shap-E The Shap-E model was proposed in [Shap-E: Generating Conditional 3D Implicit Functions](https://huggingface.co/papers/2305.02463) by Alex Nichol and Heewoo Jun from [OpenAI](https://github.com/openai). The abstract from the paper is: *We present Shap-E, a conditional generative model for 3D assets. Unlike recent work on 3D generative models which produce a single output representation, Shap-E directly generates the parameters of implicit functions that can be rendered as both textured meshes and neural radiance fields. We train Shap-E in two stages: first, we train an encoder that deterministically maps 3D assets into the parameters of an implicit function; second, we train a conditional diffusion model on outputs of the encoder. When trained on a large dataset of paired 3D and text data, our resulting models are capable of generating complex and diverse 3D assets in a matter of seconds. When compared to Point-E, an explicit generative model over point clouds, Shap-E converges faster and reaches comparable or better sample quality despite modeling a higher-dimensional, multi-representation output space.* The original codebase can be found at [openai/shap-e](https://github.com/openai/shap-e). <Tip> See the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## ShapEPipeline [[autodoc]] ShapEPipeline - all - __call__ ## ShapEImg2ImgPipeline [[autodoc]] ShapEImg2ImgPipeline - all - __call__ ## ShapEPipelineOutput [[autodoc]] pipelines.shap_e.pipeline_shap_e.ShapEPipelineOutput
diffusers/docs/source/en/api/pipelines/shap_e.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/shap_e.md", "repo_id": "diffusers", "token_count": 595 }
97
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Super-resolution The Stable Diffusion upscaler diffusion model was created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/), and [LAION](https://laion.ai/). It is used to enhance the resolution of input images by a factor of 4. <Tip> Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! If you're interested in using one of the official checkpoints for a task, explore the [CompVis](https://huggingface.co/CompVis), [Runway](https://huggingface.co/runwayml), and [Stability AI](https://huggingface.co/stabilityai) Hub organizations! </Tip> ## StableDiffusionUpscalePipeline [[autodoc]] StableDiffusionUpscalePipeline - all - __call__ - enable_attention_slicing - disable_attention_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
diffusers/docs/source/en/api/pipelines/stable_diffusion/upscale.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/upscale.md", "repo_id": "diffusers", "token_count": 476 }
98
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Metal Performance Shaders (MPS) 🤗 Diffusers is compatible with Apple silicon (M1/M2 chips) using the PyTorch [`mps`](https://pytorch.org/docs/stable/notes/mps.html) device, which uses the Metal framework to leverage the GPU on MacOS devices. You'll need to have: - macOS computer with Apple silicon (M1/M2) hardware - macOS 12.6 or later (13.0 or later recommended) - arm64 version of Python - [PyTorch 2.0](https://pytorch.org/get-started/locally/) (recommended) or 1.13 (minimum version supported for `mps`) The `mps` backend uses PyTorch's `.to()` interface to move the Stable Diffusion pipeline on to your M1 or M2 device: ```python from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to("mps") # Recommended if your computer has < 64 GB of RAM pipe.enable_attention_slicing() prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt).images[0] image ``` <Tip warning={true}> Generating multiple prompts in a batch can [crash](https://github.com/huggingface/diffusers/issues/363) or fail to work reliably. We believe this is related to the [`mps`](https://github.com/pytorch/pytorch/issues/84039) backend in PyTorch. While this is being investigated, you should iterate instead of batching. </Tip> If you're using **PyTorch 1.13**, you need to "prime" the pipeline with an additional one-time pass through it. This is a temporary workaround for an issue where the first inference pass produces slightly different results than subsequent ones. You only need to do this pass once, and after just one inference step you can discard the result. ```diff from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to("mps") pipe.enable_attention_slicing() prompt = "a photo of an astronaut riding a horse on mars" # First-time "warmup" pass if PyTorch version is 1.13 + _ = pipe(prompt, num_inference_steps=1) # Results match those from the CPU device after the warmup pass. image = pipe(prompt).images[0] ``` ## Troubleshoot M1/M2 performance is very sensitive to memory pressure. When this occurs, the system automatically swaps if it needs to which significantly degrades performance. To prevent this from happening, we recommend *attention slicing* to reduce memory pressure during inference and prevent swapping. This is especially relevant if your computer has less than 64GB of system RAM, or if you generate images at non-standard resolutions larger than 512×512 pixels. Call the [`~DiffusionPipeline.enable_attention_slicing`] function on your pipeline: ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("mps") pipeline.enable_attention_slicing() ``` Attention slicing performs the costly attention operation in multiple steps instead of all at once. It usually improves performance by ~20% in computers without universal memory, but we've observed *better performance* in most Apple silicon computers unless you have 64GB of RAM or more.
diffusers/docs/source/en/optimization/mps.md/0
{ "file_path": "diffusers/docs/source/en/optimization/mps.md", "repo_id": "diffusers", "token_count": 1062 }
99
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # InstructPix2Pix [InstructPix2Pix](https://hf.co/papers/2211.09800) is a Stable Diffusion model trained to edit images from human-provided instructions. For example, your prompt can be "turn the clouds rainy" and the model will edit the input image accordingly. This model is conditioned on the text prompt (or editing instruction) and the input image. This guide will explore the [train_instruct_pix2pix.py](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: ```bash cd examples/instruct_pix2pix pip install -r requirements.txt ``` <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```bash from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) and let us know if you have any questions or concerns. </Tip> ## Script parameters The training script has many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L65) function. Default values are provided for most parameters that work pretty well, but you can also set your own values in the training command if you'd like. For example, to increase the resolution of the input image: ```bash accelerate launch train_instruct_pix2pix.py \ --resolution=512 \ ``` Many of the basic and important parameters are described in the [Text-to-image](text2image#script-parameters) training guide, so this guide just focuses on the relevant parameters for InstructPix2Pix: - `--original_image_column`: the original image before the edits are made - `--edited_image_column`: the image after the edits are made - `--edit_prompt_column`: the instructions to edit the image - `--conditioning_dropout_prob`: the dropout probability for the edited image and edit prompts during training which enables classifier-free guidance (CFG) for one or both conditioning inputs ## Training script The dataset preprocessing code and training loop are found in the [`main()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L374) function. This is where you'll make your changes to the training script to adapt it for your own use-case. As with the script parameters, a walkthrough of the training script is provided in the [Text-to-image](text2image#training-script) training guide. Instead, this guide takes a look at the InstructPix2Pix relevant parts of the script. The script begins by modifing the [number of input channels](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L445) in the first convolutional layer of the UNet to account for InstructPix2Pix's additional conditioning image: ```py in_channels = 8 out_channels = unet.conv_in.out_channels unet.register_to_config(in_channels=in_channels) with torch.no_grad(): new_conv_in = nn.Conv2d( in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding ) new_conv_in.weight.zero_() new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight) unet.conv_in = new_conv_in ``` These UNet parameters are [updated](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L545C1-L551C6) by the optimizer: ```py optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) ``` Next, the edited images and and edit instructions are [preprocessed](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L624) and [tokenized](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L610C24-L610C24). It is important the same image transformations are applied to the original and edited images. ```py def preprocess_train(examples): preprocessed_images = preprocess_images(examples) original_images, edited_images = preprocessed_images.chunk(2) original_images = original_images.reshape(-1, 3, args.resolution, args.resolution) edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution) examples["original_pixel_values"] = original_images examples["edited_pixel_values"] = edited_images captions = list(examples[edit_prompt_column]) examples["input_ids"] = tokenize_captions(captions) return examples ``` Finally, in the [training loop](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L730), it starts by encoding the edited images into latent space: ```py latents = vae.encode(batch["edited_pixel_values"].to(weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor ``` Then, the script applies dropout to the original image and edit instruction embeddings to support CFG. This is what enables the model to modulate the influence of the edit instruction and original image on the edited image. ```py encoder_hidden_states = text_encoder(batch["input_ids"])[0] original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode() if args.conditioning_dropout_prob is not None: random_p = torch.rand(bsz, device=latents.device, generator=generator) prompt_mask = random_p < 2 * args.conditioning_dropout_prob prompt_mask = prompt_mask.reshape(bsz, 1, 1) null_conditioning = text_encoder(tokenize_captions([""]).to(accelerator.device))[0] encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states) image_mask_dtype = original_image_embeds.dtype image_mask = 1 - ( (random_p >= args.conditioning_dropout_prob).to(image_mask_dtype) * (random_p < 3 * args.conditioning_dropout_prob).to(image_mask_dtype) ) image_mask = image_mask.reshape(bsz, 1, 1, 1) original_image_embeds = image_mask * original_image_embeds ``` That's pretty much it! Aside from the differences described here, the rest of the script is very similar to the [Text-to-image](text2image#training-script) training script, so feel free to check it out for more details. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script Once you're happy with the changes to your script or if you're okay with the default configuration, you're ready to launch the training script! 🚀 This guide uses the [fusing/instructpix2pix-1000-samples](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) dataset, which is a smaller version of the [original dataset](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered). You can also create and use your own dataset if you'd like (see the [Create a dataset for training](create_dataset) guide). Set the `MODEL_NAME` environment variable to the name of the model (can be a model id on the Hub or a path to a local model), and the `DATASET_ID` to the name of the dataset on the Hub. The script creates and saves all the components (feature extractor, scheduler, text encoder, UNet, etc.) to a subfolder in your repository. <Tip> For better results, try longer training runs with a larger dataset. We've only tested this training script on a smaller-scale dataset. <br> To monitor training progress with Weights and Biases, add the `--report_to=wandb` parameter to the training command and specify a validation image with `--val_image_url` and a validation prompt with `--validation_prompt`. This can be really useful for debugging the model. </Tip> If you’re training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command. ```bash accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 \ --random_flip \ --train_batch_size=4 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 \ --checkpoints_total_limit=1 \ --learning_rate=5e-05 \ --max_grad_norm=1 \ --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --seed=42 \ --push_to_hub ``` After training is finished, you can use your new InstructPix2Pix for inference: ```py import PIL import requests import torch from diffusers import StableDiffusionInstructPix2PixPipeline from diffusers.utils import load_image pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained("your_cool_model", torch_dtype=torch.float16).to("cuda") generator = torch.Generator("cuda").manual_seed(0) image = load_image("https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png") prompt = "add some ducks to the lake" num_inference_steps = 20 image_guidance_scale = 1.5 guidance_scale = 10 edited_image = pipeline( prompt, image=image, num_inference_steps=num_inference_steps, image_guidance_scale=image_guidance_scale, guidance_scale=guidance_scale, generator=generator, ).images[0] edited_image.save("edited_image.png") ``` You should experiment with different `num_inference_steps`, `image_guidance_scale`, and `guidance_scale` values to see how they affect inference speed and quality. The guidance scale parameters are especially impactful because they control how much the original image and edit instructions affect the edited image. ## Stable Diffusion XL Stable Diffusion XL (SDXL) is a powerful text-to-image model that generates high-resolution images, and it adds a second text-encoder to its architecture. Use the [`train_instruct_pix2pix_sdxl.py`](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py) script to train a SDXL model to follow image editing instructions. The SDXL training script is discussed in more detail in the [SDXL training](sdxl) guide. ## Next steps Congratulations on training your own InstructPix2Pix model! 🥳 To learn more about the model, it may be helpful to: - Read the [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd) blog post to learn more about some experiments we've done with InstructPix2Pix, dataset preparation, and results for different instructions.
diffusers/docs/source/en/training/instructpix2pix.md/0
{ "file_path": "diffusers/docs/source/en/training/instructpix2pix.md", "repo_id": "diffusers", "token_count": 4161 }
100
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Pipeline callbacks The denoising loop of a pipeline can be modified with custom defined functions using the `callback_on_step_end` parameter. This can be really useful for *dynamically* adjusting certain pipeline attributes, or modifying tensor variables. The flexibility of callbacks opens up some interesting use-cases such as changing the prompt embeddings at each timestep, assigning different weights to the prompt embeddings, and editing the guidance scale. This guide will show you how to use the `callback_on_step_end` parameter to disable classifier-free guidance (CFG) after 40% of the inference steps to save compute with minimal cost to performance. The callback function should have the following arguments: * `pipe` (or the pipeline instance) provides access to useful properties such as `num_timestep` and `guidance_scale`. You can modify these properties by updating the underlying attributes. For this example, you'll disable CFG by setting `pipe._guidance_scale=0.0`. * `step_index` and `timestep` tell you where you are in the denoising loop. Use `step_index` to turn off CFG after reaching 40% of `num_timestep`. * `callback_kwargs` is a dict that contains tensor variables you can modify during the denoising loop. It only includes variables specified in the `callback_on_step_end_tensor_inputs` argument, which is passed to the pipeline's `__call__` method. Different pipelines may use different sets of variables, so please check a pipeline's `_callback_tensor_inputs` attribute for the list of variables you can modify. Some common variables include `latents` and `prompt_embeds`. For this function, change the batch size of `prompt_embeds` after setting `guidance_scale=0.0` in order for it to work properly. Your callback function should look something like this: ```python def callback_dynamic_cfg(pipe, step_index, timestep, callback_kwargs): # adjust the batch_size of prompt_embeds according to guidance_scale if step_index == int(pipe.num_timestep * 0.4): prompt_embeds = callback_kwargs["prompt_embeds"] prompt_embeds = prompt_embeds.chunk(2)[-1] # update guidance_scale and prompt_embeds pipe._guidance_scale = 0.0 callback_kwargs["prompt_embeds"] = prompt_embeds return callback_kwargs ``` Now, you can pass the callback function to the `callback_on_step_end` parameter and the `prompt_embeds` to `callback_on_step_end_tensor_inputs`. ```py import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" generator = torch.Generator(device="cuda").manual_seed(1) out = pipe(prompt, generator=generator, callback_on_step_end=callback_custom_cfg, callback_on_step_end_tensor_inputs=['prompt_embeds']) out.images[0].save("out_custom_cfg.png") ``` The callback function is executed at the end of each denoising step, and modifies the pipeline attributes and tensor variables for the next denoising step. With callbacks, you can implement features such as dynamic CFG without having to modify the underlying code at all! <Tip> 🤗 Diffusers currently only supports `callback_on_step_end`, but feel free to open a [feature request](https://github.com/huggingface/diffusers/issues/new/choose) if you have a cool use-case and require a callback function with a different execution point! </Tip> ## Interrupt the diffusion process Interrupting the diffusion process is particularly useful when building UIs that work with Diffusers because it allows users to stop the generation process if they're unhappy with the intermediate results. You can incorporate this into your pipeline with a callback. <Tip> The interruption callback is supported for text-to-image, image-to-image, and inpainting for the [StableDiffusionPipeline](../api/pipelines/stable_diffusion/overview) and [StableDiffusionXLPipeline](../api/pipelines/stable_diffusion/stable_diffusion_xl). </Tip> This callback function should take the following arguments: `pipe`, `i`, `t`, and `callback_kwargs` (this must be returned). Set the pipeline's `_interrupt` attribute to `True` to stop the diffusion process after a certain number of steps. You are also free to implement your own custom stopping logic inside the callback. In this example, the diffusion process is stopped after 10 steps even though `num_inference_steps` is set to 50. ```python from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe.enable_model_cpu_offload() num_inference_steps = 50 def interrupt_callback(pipe, i, t, callback_kwargs): stop_idx = 10 if i == stop_idx: pipe._interrupt = True return callback_kwargs pipe( "A photo of a cat", num_inference_steps=num_inference_steps, callback_on_step_end=interrupt_callback, ) ```
diffusers/docs/source/en/using-diffusers/callback.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/callback.md", "repo_id": "diffusers", "token_count": 1609 }
101
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Kandinsky [[open-in-colab]] The Kandinsky models are a series of multilingual text-to-image generation models. The Kandinsky 2.0 model uses two multilingual text encoders and concatenates those results for the UNet. [Kandinsky 2.1](../api/pipelines/kandinsky) changes the architecture to include an image prior model ([`CLIP`](https://huggingface.co/docs/transformers/model_doc/clip)) to generate a mapping between text and image embeddings. The mapping provides better text-image alignment and it is used with the text embeddings during training, leading to higher quality results. Finally, Kandinsky 2.1 uses a [Modulating Quantized Vectors (MoVQ)](https://huggingface.co/papers/2209.09002) decoder - which adds a spatial conditional normalization layer to increase photorealism - to decode the latents into images. [Kandinsky 2.2](../api/pipelines/kandinsky_v22) improves on the previous model by replacing the image encoder of the image prior model with a larger CLIP-ViT-G model to improve quality. The image prior model was also retrained on images with different resolutions and aspect ratios to generate higher-resolution images and different image sizes. [Kandinsky 3](../api/pipelines/kandinsky3) simplifies the architecture and shifts away from the two-stage generation process involving the prior model and diffusion model. Instead, Kandinsky 3 uses [Flan-UL2](https://huggingface.co/google/flan-ul2) to encode text, a UNet with [BigGan-deep](https://hf.co/papers/1809.11096) blocks, and [Sber-MoVQGAN](https://github.com/ai-forever/MoVQGAN) to decode the latents into images. Text understanding and generated image quality are primarily achieved by using a larger text encoder and UNet. This guide will show you how to use the Kandinsky models for text-to-image, image-to-image, inpainting, interpolation, and more. Before you begin, make sure you have the following libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install -q diffusers transformers accelerate ``` <Tip warning={true}> Kandinsky 2.1 and 2.2 usage is very similar! The only difference is Kandinsky 2.2 doesn't accept `prompt` as an input when decoding the latents. Instead, Kandinsky 2.2 only accepts `image_embeds` during decoding. <br> Kandinsky 3 has a more concise architecture and it doesn't require a prior model. This means it's usage is identical to other diffusion models like [Stable Diffusion XL](sdxl). </Tip> ## Text-to-image To use the Kandinsky models for any task, you always start by setting up the prior pipeline to encode the prompt and generate the image embeddings. The prior pipeline also generates `negative_image_embeds` that correspond to the negative prompt `""`. For better results, you can pass an actual `negative_prompt` to the prior pipeline, but this'll increase the effective batch size of the prior pipeline by 2x. <hfoptions id="text-to-image"> <hfoption id="Kandinsky 2.1"> ```py from diffusers import KandinskyPriorPipeline, KandinskyPipeline import torch prior_pipeline = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16).to("cuda") pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16).to("cuda") prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" negative_prompt = "low quality, bad quality" # optional to include a negative prompt, but results are usually better image_embeds, negative_image_embeds = prior_pipeline(prompt, negative_prompt, guidance_scale=1.0).to_tuple() ``` Now pass all the prompts and embeddings to the [`KandinskyPipeline`] to generate an image: ```py image = pipeline(prompt, image_embeds=image_embeds, negative_prompt=negative_prompt, negative_image_embeds=negative_image_embeds, height=768, width=768).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/cheeseburger.png"/> </div> </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline import torch prior_pipeline = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16).to("cuda") pipeline = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16).to("cuda") prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" negative_prompt = "low quality, bad quality" # optional to include a negative prompt, but results are usually better image_embeds, negative_image_embeds = prior_pipeline(prompt, guidance_scale=1.0).to_tuple() ``` Pass the `image_embeds` and `negative_image_embeds` to the [`KandinskyV22Pipeline`] to generate an image: ```py image = pipeline(image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-text-to-image.png"/> </div> </hfoption> <hfoption id="Kandinsky 3"> Kandinsky 3 doesn't require a prior model so you can directly load the [`Kandinsky3Pipeline`] and pass a prompt to generate an image: ```py from diffusers import Kandinsky3Pipeline import torch pipeline = Kandinsky3Pipeline.from_pretrained("kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" image = pipeline(prompt).images[0] image ``` </hfoption> </hfoptions> 🤗 Diffusers also provides an end-to-end API with the [`KandinskyCombinedPipeline`] and [`KandinskyV22CombinedPipeline`], meaning you don't have to separately load the prior and text-to-image pipeline. The combined pipeline automatically loads both the prior model and the decoder. You can still set different values for the prior pipeline with the `prior_guidance_scale` and `prior_num_inference_steps` parameters if you want. Use the [`AutoPipelineForText2Image`] to automatically call the combined pipelines under the hood: <hfoptions id="text-to-image"> <hfoption id="Kandinsky 2.1"> ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" negative_prompt = "low quality, bad quality" image = pipeline(prompt=prompt, negative_prompt=negative_prompt, prior_guidance_scale=1.0, guidance_scale=4.0, height=768, width=768).images[0] image ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt = "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting" negative_prompt = "low quality, bad quality" image = pipeline(prompt=prompt, negative_prompt=negative_prompt, prior_guidance_scale=1.0, guidance_scale=4.0, height=768, width=768).images[0] image ``` </hfoption> </hfoptions> ## Image-to-image For image-to-image, pass the initial image and text prompt to condition the image to the pipeline. Start by loading the prior pipeline: <hfoptions id="image-to-image"> <hfoption id="Kandinsky 2.1"> ```py import torch from diffusers import KandinskyImg2ImgPipeline, KandinskyPriorPipeline prior_pipeline = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = KandinskyImg2ImgPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True).to("cuda") ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py import torch from diffusers import KandinskyV22Img2ImgPipeline, KandinskyPriorPipeline prior_pipeline = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = KandinskyV22Img2ImgPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True).to("cuda") ``` </hfoption> <hfoption id="Kandinsky 3"> Kandinsky 3 doesn't require a prior model so you can directly load the image-to-image pipeline: ```py from diffusers import Kandinsky3Img2ImgPipeline from diffusers.utils import load_image import torch pipeline = Kandinsky3Img2ImgPipeline.from_pretrained("kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() ``` </hfoption> </hfoptions> Download an image to condition on: ```py from diffusers.utils import load_image # download image url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" original_image = load_image(url) original_image = original_image.resize((768, 512)) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"/> </div> Generate the `image_embeds` and `negative_image_embeds` with the prior pipeline: ```py prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" image_embeds, negative_image_embeds = prior_pipeline(prompt, negative_prompt).to_tuple() ``` Now pass the original image, and all the prompts and embeddings to the pipeline to generate an image: <hfoptions id="image-to-image"> <hfoption id="Kandinsky 2.1"> ```py from diffusers.utils import make_image_grid image = pipeline(prompt, negative_prompt=negative_prompt, image=original_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768, strength=0.3).images[0] make_image_grid([original_image.resize((512, 512)), image.resize((512, 512))], rows=1, cols=2) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/img2img_fantasyland.png"/> </div> </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers.utils import make_image_grid image = pipeline(image=original_image, image_embeds=image_embeds, negative_image_embeds=negative_image_embeds, height=768, width=768, strength=0.3).images[0] make_image_grid([original_image.resize((512, 512)), image.resize((512, 512))], rows=1, cols=2) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-image-to-image.png"/> </div> </hfoption> <hfoption id="Kandinsky 3"> ```py image = pipeline(prompt, negative_prompt=negative_prompt, image=image, strength=0.75, num_inference_steps=25).images[0] image ``` </hfoption> </hfoptions> 🤗 Diffusers also provides an end-to-end API with the [`KandinskyImg2ImgCombinedPipeline`] and [`KandinskyV22Img2ImgCombinedPipeline`], meaning you don't have to separately load the prior and image-to-image pipeline. The combined pipeline automatically loads both the prior model and the decoder. You can still set different values for the prior pipeline with the `prior_guidance_scale` and `prior_num_inference_steps` parameters if you want. Use the [`AutoPipelineForImage2Image`] to automatically call the combined pipelines under the hood: <hfoptions id="image-to-image"> <hfoption id="Kandinsky 2.1"> ```py from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image import torch pipeline = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True) pipeline.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" original_image = load_image(url) original_image.thumbnail((768, 768)) image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=original_image, strength=0.3).images[0] make_image_grid([original_image.resize((512, 512)), image.resize((512, 512))], rows=1, cols=2) ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers import AutoPipelineForImage2Image from diffusers.utils import make_image_grid, load_image import torch pipeline = AutoPipelineForImage2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16) pipeline.enable_model_cpu_offload() prompt = "A fantasy landscape, Cinematic lighting" negative_prompt = "low quality, bad quality" url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg" original_image = load_image(url) original_image.thumbnail((768, 768)) image = pipeline(prompt=prompt, negative_prompt=negative_prompt, image=original_image, strength=0.3).images[0] make_image_grid([original_image.resize((512, 512)), image.resize((512, 512))], rows=1, cols=2) ``` </hfoption> </hfoptions> ## Inpainting <Tip warning={true}> ⚠️ The Kandinsky models use ⬜️ **white pixels** to represent the masked area now instead of black pixels. If you are using [`KandinskyInpaintPipeline`] in production, you need to change the mask to use white pixels: ```py # For PIL input import PIL.ImageOps mask = PIL.ImageOps.invert(mask) # For PyTorch and NumPy input mask = 1 - mask ``` </Tip> For inpainting, you'll need the original image, a mask of the area to replace in the original image, and a text prompt of what to inpaint. Load the prior pipeline: <hfoptions id="inpaint"> <hfoption id="Kandinsky 2.1"> ```py from diffusers import KandinskyInpaintPipeline, KandinskyPriorPipeline from diffusers.utils import load_image, make_image_grid import torch import numpy as np from PIL import Image prior_pipeline = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = KandinskyInpaintPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16, use_safetensors=True).to("cuda") ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers import KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline from diffusers.utils import load_image, make_image_grid import torch import numpy as np from PIL import Image prior_pipeline = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") pipeline = KandinskyV22InpaintPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16, use_safetensors=True).to("cuda") ``` </hfoption> </hfoptions> Load an initial image and create a mask: ```py init_image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png") mask = np.zeros((768, 768), dtype=np.float32) # mask area above cat's head mask[:250, 250:-250] = 1 ``` Generate the embeddings with the prior pipeline: ```py prompt = "a hat" prior_output = prior_pipeline(prompt) ``` Now pass the initial image, mask, and prompt and embeddings to the pipeline to generate an image: <hfoptions id="inpaint"> <hfoption id="Kandinsky 2.1"> ```py output_image = pipeline(prompt, image=init_image, mask_image=mask, **prior_output, height=768, width=768, num_inference_steps=150).images[0] mask = Image.fromarray((mask*255).astype('uint8'), 'L') make_image_grid([init_image, mask, output_image], rows=1, cols=3) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/inpaint_cat_hat.png"/> </div> </hfoption> <hfoption id="Kandinsky 2.2"> ```py output_image = pipeline(image=init_image, mask_image=mask, **prior_output, height=768, width=768, num_inference_steps=150).images[0] mask = Image.fromarray((mask*255).astype('uint8'), 'L') make_image_grid([init_image, mask, output_image], rows=1, cols=3) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinskyv22-inpaint.png"/> </div> </hfoption> </hfoptions> You can also use the end-to-end [`KandinskyInpaintCombinedPipeline`] and [`KandinskyV22InpaintCombinedPipeline`] to call the prior and decoder pipelines together under the hood. Use the [`AutoPipelineForInpainting`] for this: <hfoptions id="inpaint"> <hfoption id="Kandinsky 2.1"> ```py import torch import numpy as np from PIL import Image from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipe = AutoPipelineForInpainting.from_pretrained("kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() init_image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png") mask = np.zeros((768, 768), dtype=np.float32) # mask area above cat's head mask[:250, 250:-250] = 1 prompt = "a hat" output_image = pipe(prompt=prompt, image=init_image, mask_image=mask).images[0] mask = Image.fromarray((mask*255).astype('uint8'), 'L') make_image_grid([init_image, mask, output_image], rows=1, cols=3) ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py import torch import numpy as np from PIL import Image from diffusers import AutoPipelineForInpainting from diffusers.utils import load_image, make_image_grid pipe = AutoPipelineForInpainting.from_pretrained("kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() init_image = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png") mask = np.zeros((768, 768), dtype=np.float32) # mask area above cat's head mask[:250, 250:-250] = 1 prompt = "a hat" output_image = pipe(prompt=prompt, image=original_image, mask_image=mask).images[0] mask = Image.fromarray((mask*255).astype('uint8'), 'L') make_image_grid([init_image, mask, output_image], rows=1, cols=3) ``` </hfoption> </hfoptions> ## Interpolation Interpolation allows you to explore the latent space between the image and text embeddings which is a cool way to see some of the prior model's intermediate outputs. Load the prior pipeline and two images you'd like to interpolate: <hfoptions id="interpolate"> <hfoption id="Kandinsky 2.1"> ```py from diffusers import KandinskyPriorPipeline, KandinskyPipeline from diffusers.utils import load_image, make_image_grid import torch prior_pipeline = KandinskyPriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") img_1 = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png") img_2 = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/starry_night.jpeg") make_image_grid([img_1.resize((512,512)), img_2.resize((512,512))], rows=1, cols=2) ``` </hfoption> <hfoption id="Kandinsky 2.2"> ```py from diffusers import KandinskyV22PriorPipeline, KandinskyV22Pipeline from diffusers.utils import load_image, make_image_grid import torch prior_pipeline = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16, use_safetensors=True).to("cuda") img_1 = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png") img_2 = load_image("https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/starry_night.jpeg") make_image_grid([img_1.resize((512,512)), img_2.resize((512,512))], rows=1, cols=2) ``` </hfoption> </hfoptions> <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">a cat</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/starry_night.jpeg"/> <figcaption class="mt-2 text-center text-sm text-gray-500">Van Gogh's Starry Night painting</figcaption> </div> </div> Specify the text or images to interpolate, and set the weights for each text or image. Experiment with the weights to see how they affect the interpolation! ```py images_texts = ["a cat", img_1, img_2] weights = [0.3, 0.3, 0.4] ``` Call the `interpolate` function to generate the embeddings, and then pass them to the pipeline to generate the image: <hfoptions id="interpolate"> <hfoption id="Kandinsky 2.1"> ```py # prompt can be left empty prompt = "" prior_out = prior_pipeline.interpolate(images_texts, weights) pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16, use_safetensors=True).to("cuda") image = pipeline(prompt, **prior_out, height=768, width=768).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinsky-docs/starry_cat.png"/> </div> </hfoption> <hfoption id="Kandinsky 2.2"> ```py # prompt can be left empty prompt = "" prior_out = prior_pipeline.interpolate(images_texts, weights) pipeline = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16, use_safetensors=True).to("cuda") image = pipeline(prompt, **prior_out, height=768, width=768).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/kandinskyv22-interpolate.png"/> </div> </hfoption> </hfoptions> ## ControlNet <Tip warning={true}> ⚠️ ControlNet is only supported for Kandinsky 2.2! </Tip> ControlNet enables conditioning large pretrained diffusion models with additional inputs such as a depth map or edge detection. For example, you can condition Kandinsky 2.2 with a depth map so the model understands and preserves the structure of the depth image. Let's load an image and extract it's depth map: ```py from diffusers.utils import load_image img = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png" ).resize((768, 768)) img ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png"/> </div> Then you can use the `depth-estimation` [`~transformers.Pipeline`] from 🤗 Transformers to process the image and retrieve the depth map: ```py import torch import numpy as np from transformers import pipeline def make_hint(image, depth_estimator): image = depth_estimator(image)["depth"] image = np.array(image) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) detected_map = torch.from_numpy(image).float() / 255.0 hint = detected_map.permute(2, 0, 1) return hint depth_estimator = pipeline("depth-estimation") hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") ``` ### Text-to-image [[controlnet-text-to-image]] Load the prior pipeline and the [`KandinskyV22ControlnetPipeline`]: ```py from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline prior_pipeline = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") pipeline = KandinskyV22ControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ).to("cuda") ``` Generate the image embeddings from a prompt and negative prompt: ```py prompt = "A robot, 4k photo" negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" generator = torch.Generator(device="cuda").manual_seed(43) image_emb, zero_image_emb = prior_pipeline( prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ).to_tuple() ``` Finally, pass the image embeddings and the depth image to the [`KandinskyV22ControlnetPipeline`] to generate an image: ```py image = pipeline(image_embeds=image_emb, negative_image_embeds=zero_image_emb, hint=hint, num_inference_steps=50, generator=generator, height=768, width=768).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/robot_cat_text2img.png"/> </div> ### Image-to-image [[controlnet-image-to-image]] For image-to-image with ControlNet, you'll need to use the: - [`KandinskyV22PriorEmb2EmbPipeline`] to generate the image embeddings from a text prompt and an image - [`KandinskyV22ControlnetImg2ImgPipeline`] to generate an image from the initial image and the image embeddings Process and extract a depth map of an initial image of a cat with the `depth-estimation` [`~transformers.Pipeline`] from 🤗 Transformers: ```py import torch import numpy as np from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline from diffusers.utils import load_image from transformers import pipeline img = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/cat.png" ).resize((768, 768)) def make_hint(image, depth_estimator): image = depth_estimator(image)["depth"] image = np.array(image) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) detected_map = torch.from_numpy(image).float() / 255.0 hint = detected_map.permute(2, 0, 1) return hint depth_estimator = pipeline("depth-estimation") hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") ``` Load the prior pipeline and the [`KandinskyV22ControlnetImg2ImgPipeline`]: ```py prior_pipeline = KandinskyV22PriorEmb2EmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16, use_safetensors=True ).to("cuda") pipeline = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ).to("cuda") ``` Pass a text prompt and the initial image to the prior pipeline to generate the image embeddings: ```py prompt = "A robot, 4k photo" negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" generator = torch.Generator(device="cuda").manual_seed(43) img_emb = prior_pipeline(prompt=prompt, image=img, strength=0.85, generator=generator) negative_emb = prior_pipeline(prompt=negative_prior_prompt, image=img, strength=1, generator=generator) ``` Now you can run the [`KandinskyV22ControlnetImg2ImgPipeline`] to generate an image from the initial image and the image embeddings: ```py image = pipeline(image=img, strength=0.5, image_embeds=img_emb.image_embeds, negative_image_embeds=negative_emb.image_embeds, hint=hint, num_inference_steps=50, generator=generator, height=768, width=768).images[0] make_image_grid([img.resize((512, 512)), image.resize((512, 512))], rows=1, cols=2) ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinskyv22/robot_cat.png"/> </div> ## Optimizations Kandinsky is unique because it requires a prior pipeline to generate the mappings, and a second pipeline to decode the latents into an image. Optimization efforts should be focused on the second pipeline because that is where the bulk of the computation is done. Here are some tips to improve Kandinsky during inference. 1. Enable [xFormers](../optimization/xformers) if you're using PyTorch < 2.0: ```diff from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) + pipe.enable_xformers_memory_efficient_attention() ``` 2. Enable `torch.compile` if you're using PyTorch >= 2.0 to automatically use scaled dot-product attention (SDPA): ```diff pipe.unet.to(memory_format=torch.channels_last) + pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) ``` This is the same as explicitly setting the attention processor to use [`~models.attention_processor.AttnAddedKVProcessor2_0`]: ```py from diffusers.models.attention_processor import AttnAddedKVProcessor2_0 pipe.unet.set_attn_processor(AttnAddedKVProcessor2_0()) ``` 3. Offload the model to the CPU with [`~KandinskyPriorPipeline.enable_model_cpu_offload`] to avoid out-of-memory errors: ```diff from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) + pipe.enable_model_cpu_offload() ``` 4. By default, the text-to-image pipeline uses the [`DDIMScheduler`] but you can replace it with another scheduler like [`DDPMScheduler`] to see how that affects the tradeoff between inference speed and image quality: ```py from diffusers import DDPMScheduler from diffusers import DiffusionPipeline scheduler = DDPMScheduler.from_pretrained("kandinsky-community/kandinsky-2-1", subfolder="ddpm_scheduler") pipe = DiffusionPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", scheduler=scheduler, torch_dtype=torch.float16, use_safetensors=True).to("cuda") ```
diffusers/docs/source/en/using-diffusers/kandinsky.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/kandinsky.md", "repo_id": "diffusers", "token_count": 10811 }
102
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Textual inversion [[open-in-colab]] The [`StableDiffusionPipeline`] supports textual inversion, a technique that enables a model like Stable Diffusion to learn a new concept from just a few sample images. This gives you more control over the generated images and allows you to tailor the model towards specific concepts. You can get started quickly with a collection of community created concepts in the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer). This guide will show you how to run inference with textual inversion using a pre-learned concept from the Stable Diffusion Conceptualizer. If you're interested in teaching a model new concepts with textual inversion, take a look at the [Textual Inversion](../training/text_inversion) training guide. Import the necessary libraries: ```py import torch from diffusers import StableDiffusionPipeline from diffusers.utils import make_image_grid ``` ## Stable Diffusion 1 and 2 Pick a Stable Diffusion checkpoint and a pre-learned concept from the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer): ```py pretrained_model_name_or_path = "runwayml/stable-diffusion-v1-5" repo_id_embeds = "sd-concepts-library/cat-toy" ``` Now you can load a pipeline, and pass the pre-learned concept to it: ```py pipeline = StableDiffusionPipeline.from_pretrained( pretrained_model_name_or_path, torch_dtype=torch.float16, use_safetensors=True ).to("cuda") pipeline.load_textual_inversion(repo_id_embeds) ``` Create a prompt with the pre-learned concept by using the special placeholder token `<cat-toy>`, and choose the number of samples and rows of images you'd like to generate: ```py prompt = "a grafitti in a favela wall with a <cat-toy> on it" num_samples_per_row = 2 num_rows = 2 ``` Then run the pipeline (feel free to adjust the parameters like `num_inference_steps` and `guidance_scale` to see how they affect image quality), save the generated images and visualize them with the helper function you created at the beginning: ```py all_images = [] for _ in range(num_rows): images = pipeline(prompt, num_images_per_prompt=num_samples_per_row, num_inference_steps=50, guidance_scale=7.5).images all_images.extend(images) grid = make_image_grid(all_images, num_rows, num_samples_per_row) grid ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/textual_inversion_inference.png"> </div> ## Stable Diffusion XL Stable Diffusion XL (SDXL) can also use textual inversion vectors for inference. In contrast to Stable Diffusion 1 and 2, SDXL has two text encoders so you'll need two textual inversion embeddings - one for each text encoder model. Let's download the SDXL textual inversion embeddings and have a closer look at it's structure: ```py from huggingface_hub import hf_hub_download from safetensors.torch import load_file file = hf_hub_download("dn118/unaestheticXL", filename="unaestheticXLv31.safetensors") state_dict = load_file(file) state_dict ``` ``` {'clip_g': tensor([[ 0.0077, -0.0112, 0.0065, ..., 0.0195, 0.0159, 0.0275], ..., [-0.0170, 0.0213, 0.0143, ..., -0.0302, -0.0240, -0.0362]], 'clip_l': tensor([[ 0.0023, 0.0192, 0.0213, ..., -0.0385, 0.0048, -0.0011], ..., [ 0.0475, -0.0508, -0.0145, ..., 0.0070, -0.0089, -0.0163]], ``` There are two tensors, `"clip_g"` and `"clip_l"`. `"clip_g"` corresponds to the bigger text encoder in SDXL and refers to `pipe.text_encoder_2` and `"clip_l"` refers to `pipe.text_encoder`. Now you can load each tensor separately by passing them along with the correct text encoder and tokenizer to [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`]: ```py from diffusers import AutoPipelineForText2Image import torch pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", torch_dtype=torch.float16) pipe.to("cuda") pipe.load_textual_inversion(state_dict["clip_g"], token="unaestheticXLv31", text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2) pipe.load_textual_inversion(state_dict["clip_l"], token="unaestheticXLv31", text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer) # the embedding should be used as a negative embedding, so we pass it as a negative prompt generator = torch.Generator().manual_seed(33) image = pipe("a woman standing in front of a mountain", negative_prompt="unaestheticXLv31", generator=generator).images[0] image ```
diffusers/docs/source/en/using-diffusers/textual_inversion_inference.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/textual_inversion_inference.md", "repo_id": "diffusers", "token_count": 1717 }
103
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 설치 사용하시는 라이브러리에 맞는 🤗 Diffusers를 설치하세요. 🤗 Diffusers는 Python 3.8+, PyTorch 1.7.0+ 및 flax에서 테스트되었습니다. 사용중인 딥러닝 라이브러리에 대한 아래의 설치 안내를 따르세요. - [PyTorch 설치 안내](https://pytorch.org/get-started/locally/) - [Flax 설치 안내](https://flax.readthedocs.io/en/latest/) ## pip를 이용한 설치 [가상 환경](https://docs.python.org/3/library/venv.html)에 🤗 Diffusers를 설치해야 합니다. Python 가상 환경에 익숙하지 않은 경우 [가상환경 pip 설치 가이드](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)를 살펴보세요. 가상 환경을 사용하면 서로 다른 프로젝트를 더 쉽게 관리하고, 종속성간의 호환성 문제를 피할 수 있습니다. 프로젝트 디렉토리에 가상 환경을 생성하는 것으로 시작하세요: ```bash python -m venv .env ``` 그리고 가상 환경을 활성화합니다: ```bash source .env/bin/activate ``` 이제 다음의 명령어로 🤗 Diffusers를 설치할 준비가 되었습니다: **PyTorch의 경우** ```bash pip install diffusers["torch"] ``` **Flax의 경우** ```bash pip install diffusers["flax"] ``` ## 소스로부터 설치 소스에서 `diffusers`를 설치하기 전에, `torch` 및 `accelerate`이 설치되어 있는지 확인하세요. `torch` 설치에 대해서는 [torch docs](https://pytorch.org/get-started/locally/#start-locally)를 참고하세요. 다음과 같이 `accelerate`을 설치하세요. ```bash pip install accelerate ``` 다음 명령어를 사용하여 소스에서 🤗 Diffusers를 설치하세요: ```bash pip install git+https://github.com/huggingface/diffusers ``` 이 명령어는 최신 `stable` 버전이 아닌 최첨단 `main` 버전을 설치합니다. `main` 버전은 최신 개발 정보를 최신 상태로 유지하는 데 유용합니다. 예를 들어 마지막 공식 릴리즈 이후 버그가 수정되었지만, 새 릴리즈가 아직 출시되지 않은 경우입니다. 그러나 이는 `main` 버전이 항상 안정적이지 않을 수 있음을 의미합니다. 우리는 `main` 버전이 지속적으로 작동하도록 노력하고 있으며, 대부분의 문제는 보통 몇 시간 또는 하루 안에 해결됩니다. 문제가 발생하면 더 빨리 해결할 수 있도록 [Issue](https://github.com/huggingface/transformers/issues)를 열어주세요! ## 편집가능한 설치 다음을 수행하려면 편집가능한 설치가 필요합니다: * 소스 코드의 `main` 버전을 사용 * 🤗 Diffusers에 기여 (코드의 변경 사항을 테스트하기 위해 필요) 저장소를 복제하고 다음 명령어를 사용하여 🤗 Diffusers를 설치합니다: ```bash git clone https://github.com/huggingface/diffusers.git cd diffusers ``` **PyTorch의 경우** ``` pip install -e ".[torch]" ``` **Flax의 경우** ``` pip install -e ".[flax]" ``` 이러한 명령어들은 저장소를 복제한 폴더와 Python 라이브러리 경로를 연결합니다. Python은 이제 일반 라이브러리 경로에 더하여 복제한 폴더 내부를 살펴봅니다. 예를들어 Python 패키지가 `~/anaconda3/envs/main/lib/python3.8/site-packages/`에 설치되어 있는 경우 Python은 복제한 폴더인 `~/diffusers/`도 검색합니다. <Tip warning={true}> 라이브러리를 계속 사용하려면 `diffusers` 폴더를 유지해야 합니다. </Tip> 이제 다음 명령어를 사용하여 최신 버전의 🤗 Diffusers로 쉽게 업데이트할 수 있습니다: ```bash cd ~/diffusers/ git pull ``` 이렇게 하면, 다음에 실행할 때 Python 환경이 🤗 Diffusers의 `main` 버전을 찾게 됩니다. ## 텔레메트리 로깅에 대한 알림 우리 라이브러리는 `from_pretrained()` 요청 중에 텔레메트리 정보를 원격으로 수집합니다. 이 데이터에는 Diffusers 및 PyTorch/Flax의 버전, 요청된 모델 또는 파이프라인 클래스, 그리고 허브에서 호스팅되는 경우 사전학습된 체크포인트에 대한 경로를 포함합니다. 이 사용 데이터는 문제를 디버깅하고 새로운 기능의 우선순위를 지정하는데 도움이 됩니다. 텔레메트리는 HuggingFace 허브에서 모델과 파이프라인을 불러올 때만 전송되며, 로컬 사용 중에는 수집되지 않습니다. 우리는 추가 정보를 공유하지 않기를 원하는 사람이 있다는 것을 이해하고 개인 정보를 존중하므로, 터미널에서 `DISABLE_TELEMETRY` 환경 변수를 설정하여 텔레메트리 수집을 비활성화할 수 있습니다. Linux/MacOS에서: ```bash export DISABLE_TELEMETRY=YES ``` Windows에서: ```bash set DISABLE_TELEMETRY=YES ```
diffusers/docs/source/ko/installation.md/0
{ "file_path": "diffusers/docs/source/ko/installation.md", "repo_id": "diffusers", "token_count": 3688 }
104
<!--Copyright 2023 Custom Diffusion authors The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 커스텀 Diffusion 학습 예제 [커스텀 Diffusion](https://arxiv.org/abs/2212.04488)은 피사체의 이미지 몇 장(4~5장)만 주어지면 Stable Diffusion처럼 text-to-image 모델을 커스터마이징하는 방법입니다. 'train_custom_diffusion.py' 스크립트는 학습 과정을 구현하고 이를 Stable Diffusion에 맞게 조정하는 방법을 보여줍니다. 이 교육 사례는 [Nupur Kumari](https://nupurkmr9.github.io/)가 제공하였습니다. (Custom Diffusion의 저자 중 한명). ## 로컬에서 PyTorch로 실행하기 ### Dependencies 설치하기 스크립트를 실행하기 전에 라이브러리의 학습 dependencies를 설치해야 합니다: **중요** 예제 스크립트의 최신 버전을 성공적으로 실행하려면 **소스로부터 설치**하는 것을 매우 권장하며, 예제 스크립트를 자주 업데이트하는 만큼 일부 예제별 요구 사항을 설치하고 설치를 최신 상태로 유지하는 것이 좋습니다. 이를 위해 새 가상 환경에서 다음 단계를 실행하세요: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` [example folder](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion)로 cd하여 이동하세요. ``` cd examples/custom_diffusion ``` 이제 실행 ```bash pip install -r requirements.txt pip install clip-retrieval ``` 그리고 [🤗Accelerate](https://github.com/huggingface/accelerate/) 환경을 초기화: ```bash accelerate config ``` 또는 사용자 환경에 대한 질문에 답하지 않고 기본 가속 구성을 사용하려면 다음과 같이 하세요. ```bash accelerate config default ``` 또는 사용 중인 환경이 대화형 셸을 지원하지 않는 경우(예: jupyter notebook) ```python from accelerate.utils import write_basic_config write_basic_config() ``` ### 고양이 예제 😺 이제 데이터셋을 가져옵니다. [여기](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip)에서 데이터셋을 다운로드하고 압축을 풉니다. 직접 데이터셋을 사용하려면 [학습용 데이터셋 생성하기](create_dataset) 가이드를 참고하세요. 또한 'clip-retrieval'을 사용하여 200개의 실제 이미지를 수집하고, regularization으로서 이를 학습 데이터셋의 타겟 이미지와 결합합니다. 이렇게 하면 주어진 타겟 이미지에 대한 과적합을 방지할 수 있습니다. 다음 플래그를 사용하면 `prior_loss_weight=1.`로 `prior_preservation`, `real_prior` regularization을 활성화할 수 있습니다. 클래스_프롬프트`는 대상 이미지와 동일한 카테고리 이름이어야 합니다. 수집된 실제 이미지에는 `class_prompt`와 유사한 텍스트 캡션이 있습니다. 검색된 이미지는 `class_data_dir`에 저장됩니다. 생성된 이미지를 regularization으로 사용하기 위해 `real_prior`를 비활성화할 수 있습니다. 실제 이미지를 수집하려면 훈련 전에 이 명령을 먼저 사용하십시오. ```bash pip install clip-retrieval python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200 ``` **___참고: [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 모델을 사용하는 경우 '해상도'를 768로 변경하세요.___** 스크립트는 모델 체크포인트와 `pytorch_custom_diffusion_weights.bin` 파일을 생성하여 저장소에 저장합니다. ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" export INSTANCE_DIR="./data/cat" accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --class_data_dir=./real_reg/samples_cat/ \ --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ --class_prompt="cat" --num_class_images=200 \ --instance_prompt="photo of a <new1> cat" \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=1e-5 \ --lr_warmup_steps=0 \ --max_train_steps=250 \ --scale_lr --hflip \ --modifier_token "<new1>" \ --push_to_hub ``` **더 낮은 VRAM 요구 사항(GPU당 16GB)으로 더 빠르게 훈련하려면 `--enable_xformers_memory_efficient_attention`을 사용하세요. 설치 방법은 [가이드](https://github.com/facebookresearch/xformers)를 따르세요.** 가중치 및 편향(`wandb`)을 사용하여 실험을 추적하고 중간 결과를 저장하려면(강력히 권장합니다) 다음 단계를 따르세요: * `wandb` 설치: `pip install wandb`. * 로그인 : `wandb login`. * 그런 다음 트레이닝을 시작하는 동안 `validation_prompt`를 지정하고 `report_to`를 `wandb`로 설정합니다. 다음과 같은 관련 인수를 구성할 수도 있습니다: * `num_validation_images` * `validation_steps` ```bash accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --class_data_dir=./real_reg/samples_cat/ \ --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ --class_prompt="cat" --num_class_images=200 \ --instance_prompt="photo of a <new1> cat" \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=1e-5 \ --lr_warmup_steps=0 \ --max_train_steps=250 \ --scale_lr --hflip \ --modifier_token "<new1>" \ --validation_prompt="<new1> cat sitting in a bucket" \ --report_to="wandb" \ --push_to_hub ``` 다음은 [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/26ghrcau)의 예시이며, 여러 학습 세부 정보와 함께 중간 결과들을 확인할 수 있습니다. `--push_to_hub`를 지정하면 학습된 파라미터가 허깅 페이스 허브의 리포지토리에 푸시됩니다. 다음은 [예제 리포지토리](https://huggingface.co/sayakpaul/custom-diffusion-cat)입니다. ### 멀티 컨셉에 대한 학습 🐱🪵 [this](https://github.com/ShivamShrirao/diffusers/blob/main/examples/dreambooth/train_dreambooth.py)와 유사하게 각 컨셉에 대한 정보가 포함된 [json](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) 파일을 제공합니다. 실제 이미지를 수집하려면 json 파일의 각 컨셉에 대해 이 명령을 실행합니다. ```bash pip install clip-retrieval python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200 ``` 그럼 우리는 학습시킬 준비가 되었습니다! ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --output_dir=$OUTPUT_DIR \ --concepts_list=./concept_list.json \ --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=1e-5 \ --lr_warmup_steps=0 \ --max_train_steps=500 \ --num_class_images=200 \ --scale_lr --hflip \ --modifier_token "<new1>+<new2>" \ --push_to_hub ``` 다음은 [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/3990tzkg)의 예시이며, 다른 학습 세부 정보와 함께 중간 결과들을 확인할 수 있습니다. ### 사람 얼굴에 대한 학습 사람 얼굴에 대한 파인튜닝을 위해 다음과 같은 설정이 더 효과적이라는 것을 확인했습니다: `learning_rate=5e-6`, `max_train_steps=1000 to 2000`, `freeze_model=crossattn`을 최소 15~20개의 이미지로 설정합니다. 실제 이미지를 수집하려면 훈련 전에 이 명령을 먼저 사용하십시오. ```bash pip install clip-retrieval python retrieve.py --class_prompt person --class_data_dir real_reg/samples_person --num_class_images 200 ``` 이제 학습을 시작하세요! ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export OUTPUT_DIR="path-to-save-model" export INSTANCE_DIR="path-to-images" accelerate launch train_custom_diffusion.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --class_data_dir=./real_reg/samples_person/ \ --with_prior_preservation --real_prior --prior_loss_weight=1.0 \ --class_prompt="person" --num_class_images=200 \ --instance_prompt="photo of a <new1> person" \ --resolution=512 \ --train_batch_size=2 \ --learning_rate=5e-6 \ --lr_warmup_steps=0 \ --max_train_steps=1000 \ --scale_lr --hflip --noaug \ --freeze_model crossattn \ --modifier_token "<new1>" \ --enable_xformers_memory_efficient_attention \ --push_to_hub ``` ## 추론 위 프롬프트를 사용하여 모델을 학습시킨 후에는 아래 프롬프트를 사용하여 추론을 실행할 수 있습니다. 프롬프트에 'modifier token'(예: 위 예제에서는 \<new1\>)을 반드시 포함해야 합니다. ```python import torch from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16).to("cuda") pipe.unet.load_attn_procs("path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin") pipe.load_textual_inversion("path-to-save-model", weight_name="<new1>.bin") image = pipe( "<new1> cat sitting in a bucket", num_inference_steps=100, guidance_scale=6.0, eta=1.0, ).images[0] image.save("cat.png") ``` 허브 리포지토리에서 이러한 매개변수를 직접 로드할 수 있습니다: ```python import torch from huggingface_hub.repocard import RepoCard from diffusers import DiffusionPipeline model_id = "sayakpaul/custom-diffusion-cat" card = RepoCard.load(model_id) base_model_id = card.data.to_dict()["base_model"] pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to("cuda") pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin") pipe.load_textual_inversion(model_id, weight_name="<new1>.bin") image = pipe( "<new1> cat sitting in a bucket", num_inference_steps=100, guidance_scale=6.0, eta=1.0, ).images[0] image.save("cat.png") ``` 다음은 여러 컨셉으로 추론을 수행하는 예제입니다: ```python import torch from huggingface_hub.repocard import RepoCard from diffusers import DiffusionPipeline model_id = "sayakpaul/custom-diffusion-cat-wooden-pot" card = RepoCard.load(model_id) base_model_id = card.data.to_dict()["base_model"] pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to("cuda") pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin") pipe.load_textual_inversion(model_id, weight_name="<new1>.bin") pipe.load_textual_inversion(model_id, weight_name="<new2>.bin") image = pipe( "the <new1> cat sculpture in the style of a <new2> wooden pot", num_inference_steps=100, guidance_scale=6.0, eta=1.0, ).images[0] image.save("multi-subject.png") ``` 여기서 '고양이'와 '나무 냄비'는 여러 컨셉을 말합니다. ### 학습된 체크포인트에서 추론하기 `--checkpointing_steps` 인수를 사용한 경우 학습 과정에서 저장된 전체 체크포인트 중 하나에서 추론을 수행할 수도 있습니다. ## Grads를 None으로 설정 더 많은 메모리를 절약하려면 스크립트에 `--set_grads_to_none` 인수를 전달하세요. 이렇게 하면 성적이 0이 아닌 없음으로 설정됩니다. 그러나 특정 동작이 변경되므로 문제가 발생하면 이 인수를 제거하세요. 자세한 정보: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html ## 실험 결과 실험에 대한 자세한 내용은 [당사 웹페이지](https://www.cs.cmu.edu/~custom-diffusion/)를 참조하세요.
diffusers/docs/source/ko/training/custom_diffusion.md/0
{ "file_path": "diffusers/docs/source/ko/training/custom_diffusion.md", "repo_id": "diffusers", "token_count": 7053 }
105
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 커스텀 파이프라인 불러오기 [[open-in-colab]] 커뮤니티 파이프라인은 논문에 명시된 원래의 구현체와 다른 형태로 구현된 모든 [`DiffusionPipeline`] 클래스를 의미합니다. (예를 들어, [`StableDiffusionControlNetPipeline`]는 ["Text-to-Image Generation with ControlNet Conditioning"](https://arxiv.org/abs/2302.05543) 해당) 이들은 추가 기능을 제공하거나 파이프라인의 원래 구현을 확장합니다. [Speech to Image](https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image) 또는 [Composable Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#composable-stable-diffusion) 과 같은 멋진 커뮤니티 파이프라인이 많이 있으며 [여기에서](https://github.com/huggingface/diffusers/tree/main/examples/community) 모든 공식 커뮤니티 파이프라인을 찾을 수 있습니다. 허브에서 커뮤니티 파이프라인을 로드하려면, 커뮤니티 파이프라인의 리포지토리 ID와 (파이프라인 가중치 및 구성 요소를 로드하려는) 모델의 리포지토리 ID를 인자로 전달해야 합니다. 예를 들어, 아래 예시에서는 `hf-internal-testing/diffusers-dummy-pipeline`에서 더미 파이프라인을 불러오고, `google/ddpm-cifar10-32`에서 파이프라인의 가중치와 컴포넌트들을 로드합니다. <Tip warning={true}> 🔒 허깅 페이스 허브에서 커뮤니티 파이프라인을 불러오는 것은 곧 해당 코드가 안전하다고 신뢰하는 것입니다. 코드를 자동으로 불러오고 실행하기 앞서 반드시 온라인으로 해당 코드의 신뢰성을 검사하세요! </Tip> ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline" ) ``` 공식 커뮤니티 파이프라인을 불러오는 것은 비슷하지만, 공식 리포지토리 ID에서 가중치를 불러오는 것과 더불어 해당 파이프라인 내의 컴포넌트를 직접 지정하는 것 역시 가능합니다. 아래 예제를 보면 커뮤니티 [CLIP Guided Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#clip-guided-stable-diffusion) 파이프라인을 로드할 때, 해당 파이프라인에서 사용할 `clip_model` 컴포넌트와 `feature_extractor` 컴포넌트를 직접 설정하는 것을 확인할 수 있습니다. ```py from diffusers import DiffusionPipeline from transformers import CLIPImageProcessor, CLIPModel clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id) clip_model = CLIPModel.from_pretrained(clip_model_id) pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", custom_pipeline="clip_guided_stable_diffusion", clip_model=clip_model, feature_extractor=feature_extractor, ) ``` 커뮤니티 파이프라인에 대한 자세한 내용은 [커뮤니티 파이프라인](https://github.com/huggingface/diffusers/blob/main/docs/source/en/using-diffusers/custom_pipeline_examples) 가이드를 살펴보세요. 커뮤니티 파이프라인 등록에 관심이 있는 경우 [커뮤니티 파이프라인에 기여하는 방법](https://github.com/huggingface/diffusers/blob/main/docs/source/en/using-diffusers/contribute_pipeline)에 대한 가이드를 확인하세요 !
diffusers/docs/source/ko/using-diffusers/custom_pipeline_overview.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/custom_pipeline_overview.md", "repo_id": "diffusers", "token_count": 2383 }
106
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 파이프라인, 모델 및 스케줄러 이해하기 [[open-in-colab]] 🧨 Diffusers는 사용자 친화적이며 유연한 도구 상자로, 사용사례에 맞게 diffusion 시스템을 구축 할 수 있도록 설계되었습니다. 이 도구 상자의 핵심은 모델과 스케줄러입니다. [`DiffusionPipeline`]은 편의를 위해 이러한 구성 요소를 번들로 제공하지만, 파이프라인을 분리하고 모델과 스케줄러를 개별적으로 사용해 새로운 diffusion 시스템을 만들 수도 있습니다. 이 튜토리얼에서는 기본 파이프라인부터 시작해 Stable Diffusion 파이프라인까지 진행하며 모델과 스케줄러를 사용해 추론을 위한 diffusion 시스템을 조립하는 방법을 배웁니다. ## 기본 파이프라인 해체하기 파이프라인은 추론을 위해 모델을 실행하는 빠르고 쉬운 방법으로, 이미지를 생성하는 데 코드가 4줄 이상 필요하지 않습니다: ```py >>> from diffusers import DDPMPipeline >>> ddpm = DDPMPipeline.from_pretrained("google/ddpm-cat-256").to("cuda") >>> image = ddpm(num_inference_steps=25).images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ddpm-cat.png" alt="Image of cat created from DDPMPipeline"/> </div> 정말 쉽습니다. 그런데 파이프라인은 어떻게 이렇게 할 수 있었을까요? 파이프라인을 세분화하여 내부에서 어떤 일이 일어나고 있는지 살펴보겠습니다. 위 예시에서 파이프라인에는 [`UNet2DModel`] 모델과 [`DDPMScheduler`]가 포함되어 있습니다. 파이프라인은 원하는 출력 크기의 랜덤 노이즈를 받아 모델을 여러번 통과시켜 이미지의 노이즈를 제거합니다. 각 timestep에서 모델은 *noise residual*을 예측하고 스케줄러는 이를 사용하여 노이즈가 적은 이미지를 예측합니다. 파이프라인은 지정된 추론 스텝수에 도달할 때까지 이 과정을 반복합니다. 모델과 스케줄러를 별도로 사용하여 파이프라인을 다시 생성하기 위해 자체적인 노이즈 제거 프로세스를 작성해 보겠습니다. 1. 모델과 스케줄러를 불러옵니다: ```py >>> from diffusers import DDPMScheduler, UNet2DModel >>> scheduler = DDPMScheduler.from_pretrained("google/ddpm-cat-256") >>> model = UNet2DModel.from_pretrained("google/ddpm-cat-256").to("cuda") ``` 2. 노이즈 제거 프로세스를 실행할 timestep 수를 설정합니다: ```py >>> scheduler.set_timesteps(50) ``` 3. 스케줄러의 timestep을 설정하면 균등한 간격의 구성 요소를 가진 텐서가 생성됩니다.(이 예시에서는 50개) 각 요소는 모델이 이미지의 노이즈를 제거하는 시간 간격에 해당합니다. 나중에 노이즈 제거 루프를 만들 때 이 텐서를 반복하여 이미지의 노이즈를 제거합니다: ```py >>> scheduler.timesteps tensor([980, 960, 940, 920, 900, 880, 860, 840, 820, 800, 780, 760, 740, 720, 700, 680, 660, 640, 620, 600, 580, 560, 540, 520, 500, 480, 460, 440, 420, 400, 380, 360, 340, 320, 300, 280, 260, 240, 220, 200, 180, 160, 140, 120, 100, 80, 60, 40, 20, 0]) ``` 4. 원하는 출력과 같은 모양을 가진 랜덤 노이즈를 생성합니다: ```py >>> import torch >>> sample_size = model.config.sample_size >>> noise = torch.randn((1, 3, sample_size, sample_size), device="cuda") ``` 5. 이제 timestep을 반복하는 루프를 작성합니다. 각 timestep에서 모델은 [`UNet2DModel.forward`]를 통해 noisy residual을 반환합니다. 스케줄러의 [`~DDPMScheduler.step`] 메서드는 noisy residual, timestep, 그리고 입력을 받아 이전 timestep에서 이미지를 예측합니다. 이 출력은 노이즈 제거 루프의 모델에 대한 다음 입력이 되며, `timesteps` 배열의 끝에 도달할 때까지 반복됩니다. ```py >>> input = noise >>> for t in scheduler.timesteps: ... with torch.no_grad(): ... noisy_residual = model(input, t).sample ... previous_noisy_sample = scheduler.step(noisy_residual, t, input).prev_sample ... input = previous_noisy_sample ``` 이것이 전체 노이즈 제거 프로세스이며, 동일한 패턴을 사용해 모든 diffusion 시스템을 작성할 수 있습니다. 6. 마지막 단계는 노이즈가 제거된 출력을 이미지로 변환하는 것입니다: ```py >>> from PIL import Image >>> import numpy as np >>> image = (input / 2 + 0.5).clamp(0, 1) >>> image = image.cpu().permute(0, 2, 3, 1).numpy()[0] >>> image = Image.fromarray((image * 255).round().astype("uint8")) >>> image ``` 다음 섹션에서는 여러분의 기술을 시험해보고 좀 더 복잡한 Stable Diffusion 파이프라인을 분석해 보겠습니다. 방법은 거의 동일합니다. 필요한 구성요소들을 초기화하고 timestep수를 설정하여 `timestep` 배열을 생성합니다. 노이즈 제거 루프에서 `timestep` 배열이 사용되며, 이 배열의 각 요소에 대해 모델은 노이즈가 적은 이미지를 예측합니다. 노이즈 제거 루프는 `timestep`을 반복하고 각 timestep에서 noise residual을 출력하고 스케줄러는 이를 사용하여 이전 timestep에서 노이즈가 덜한 이미지를 예측합니다. 이 프로세스는 `timestep` 배열의 끝에 도달할 때까지 반복됩니다. 한번 사용해 봅시다! ## Stable Diffusion 파이프라인 해체하기 Stable Diffusion 은 text-to-image *latent diffusion* 모델입니다. latent diffusion 모델이라고 불리는 이유는 실제 픽셀 공간 대신 이미지의 저차원의 표현으로 작업하기 때문이고, 메모리 효율이 더 높습니다. 인코더는 이미지를 더 작은 표현으로 압축하고, 디코더는 압축된 표현을 다시 이미지로 변환합니다. text-to-image 모델의 경우 텍스트 임베딩을 생성하기 위해 tokenizer와 인코더가 필요합니다. 이전 예제에서 이미 UNet 모델과 스케줄러가 필요하다는 것은 알고 계셨을 것입니다. 보시다시피, 이것은 UNet 모델만 포함된 DDPM 파이프라인보다 더 복잡합니다. Stable Diffusion 모델에는 세 개의 개별 사전학습된 모델이 있습니다. <Tip> 💡 VAE, UNet 및 텍스트 인코더 모델의 작동방식에 대한 자세한 내용은 [How does Stable Diffusion work?](https://huggingface.co/blog/stable_diffusion#how-does-stable-diffusion-work) 블로그를 참조하세요. </Tip> 이제 Stable Diffusion 파이프라인에 필요한 구성요소들이 무엇인지 알았으니, [`~ModelMixin.from_pretrained`] 메서드를 사용해 모든 구성요소를 불러옵니다. 사전학습된 체크포인트 [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)에서 찾을 수 있으며, 각 구성요소들은 별도의 하위 폴더에 저장되어 있습니다: ```py >>> from PIL import Image >>> import torch >>> from transformers import CLIPTextModel, CLIPTokenizer >>> from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler >>> vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae") >>> tokenizer = CLIPTokenizer.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="tokenizer") >>> text_encoder = CLIPTextModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="text_encoder") >>> unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet") ``` 기본 [`PNDMScheduler`] 대신, [`UniPCMultistepScheduler`]로 교체하여 다른 스케줄러를 얼마나 쉽게 연결할 수 있는지 확인합니다: ```py >>> from diffusers import UniPCMultistepScheduler >>> scheduler = UniPCMultistepScheduler.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="scheduler") ``` 추론 속도를 높이려면 스케줄러와 달리 학습 가능한 가중치가 있으므로 모델을 GPU로 옮기세요: ```py >>> torch_device = "cuda" >>> vae.to(torch_device) >>> text_encoder.to(torch_device) >>> unet.to(torch_device) ``` ### 텍스트 임베딩 생성하기 다음 단계는 임베딩을 생성하기 위해 텍스트를 토큰화하는 것입니다. 이 텍스트는 UNet 모델에서 condition으로 사용되고 입력 프롬프트와 유사한 방향으로 diffusion 프로세스를 조정하는 데 사용됩니다. <Tip> 💡 `guidance_scale` 매개변수는 이미지를 생성할 때 프롬프트에 얼마나 많은 가중치를 부여할지 결정합니다. </Tip> 다른 프롬프트를 생성하고 싶다면 원하는 프롬프트를 자유롭게 선택하세요! ```py >>> prompt = ["a photograph of an astronaut riding a horse"] >>> height = 512 # Stable Diffusion의 기본 높이 >>> width = 512 # Stable Diffusion의 기본 너비 >>> num_inference_steps = 25 # 노이즈 제거 스텝 수 >>> guidance_scale = 7.5 # classifier-free guidance를 위한 scale >>> generator = torch.manual_seed(0) # 초기 잠재 노이즈를 생성하는 seed generator >>> batch_size = len(prompt) ``` 텍스트를 토큰화하고 프롬프트에서 임베딩을 생성합니다: ```py >>> text_input = tokenizer( ... prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt" ... ) >>> with torch.no_grad(): ... text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0] ``` 또한 패딩 토큰의 임베딩인 *unconditional 텍스트 임베딩*을 생성해야 합니다. 이 임베딩은 조건부 `text_embeddings`과 동일한 shape(`batch_size` 그리고 `seq_length`)을 가져야 합니다: ```py >>> max_length = text_input.input_ids.shape[-1] >>> uncond_input = tokenizer([""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt") >>> uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] ``` 두번의 forward pass를 피하기 위해 conditional 임베딩과 unconditional 임베딩을 배치(batch)로 연결하겠습니다: ```py >>> text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) ``` ### 랜덤 노이즈 생성 그다음 diffusion 프로세스의 시작점으로 초기 랜덤 노이즈를 생성합니다. 이것이 이미지의 잠재적 표현이며 점차적으로 노이즈가 제거됩니다. 이 시점에서 `latent` 이미지는 최종 이미지 크기보다 작지만 나중에 모델이 이를 512x512 이미지 크기로 변환하므로 괜찮습니다. <Tip> 💡 `vae` 모델에는 3개의 다운 샘플링 레이어가 있기 때문에 높이와 너비가 8로 나뉩니다. 다음을 실행하여 확인할 수 있습니다: ```py 2 ** (len(vae.config.block_out_channels) - 1) == 8 ``` </Tip> ```py >>> latents = torch.randn( ... (batch_size, unet.in_channels, height // 8, width // 8), ... generator=generator, ... device=torch_device, ... ) ``` ### 이미지 노이즈 제거 먼저 [`UniPCMultistepScheduler`]와 같은 향상된 스케줄러에 필요한 노이즈 스케일 값인 초기 노이즈 분포 *sigma* 로 입력을 스케일링 하는 것부터 시작합니다: ```py >>> latents = latents * scheduler.init_noise_sigma ``` 마지막 단계는 `latent`의 순수한 노이즈를 점진적으로 프롬프트에 설명된 이미지로 변환하는 노이즈 제거 루프를 생성하는 것입니다. 노이즈 제거 루프는 세 가지 작업을 수행해야 한다는 점을 기억하세요: 1. 노이즈 제거 중에 사용할 스케줄러의 timesteps를 설정합니다. 2. timestep을 따라 반복합니다. 3. 각 timestep에서 UNet 모델을 호출하여 noise residual을 예측하고 스케줄러에 전달하여 이전 노이즈 샘플을 계산합니다. ```py >>> from tqdm.auto import tqdm >>> scheduler.set_timesteps(num_inference_steps) >>> for t in tqdm(scheduler.timesteps): ... # classifier-free guidance를 수행하는 경우 두번의 forward pass를 수행하지 않도록 latent를 확장. ... latent_model_input = torch.cat([latents] * 2) ... latent_model_input = scheduler.scale_model_input(latent_model_input, timestep=t) ... # noise residual 예측 ... with torch.no_grad(): ... noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample ... # guidance 수행 ... noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) ... noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) ... # 이전 노이즈 샘플을 계산 x_t -> x_t-1 ... latents = scheduler.step(noise_pred, t, latents).prev_sample ``` ### 이미지 디코딩 마지막 단계는 `vae`를 이용하여 잠재 표현을 이미지로 디코딩하고 `sample`과 함께 디코딩된 출력을 얻는 것입니다: ```py # latent를 스케일링하고 vae로 이미지 디코딩 latents = 1 / 0.18215 * latents with torch.no_grad(): image = vae.decode(latents).sample ``` 마지막으로 이미지를 `PIL.Image`로 변환하면 생성된 이미지를 확인할 수 있습니다! ```py >>> image = (image / 2 + 0.5).clamp(0, 1) >>> image = image.detach().cpu().permute(0, 2, 3, 1).numpy() >>> images = (image * 255).round().astype("uint8") >>> pil_images = [Image.fromarray(image) for image in images] >>> pil_images[0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/blog/assets/98_stable_diffusion/stable_diffusion_k_lms.png"/> </div> ## 다음 단계 기본 파이프라인부터 복잡한 파이프라인까지, 자신만의 diffusion 시스템을 작성하는 데 필요한 것은 노이즈 제거 루프뿐이라는 것을 알 수 있었습니다. 이 루프는 스케줄러의 timesteps를 설정하고, 이를 반복하며, UNet 모델을 호출하여 noise residual을 예측하고 스케줄러에 전달하여 이전 노이즈 샘플을 계산하는 과정을 번갈아 가며 수행해야 합니다. 이것이 바로 🧨 Diffusers가 설계된 목적입니다: 모델과 스케줄러를 사용해 자신만의 diffusion 시스템을 직관적이고 쉽게 작성할 수 있도록 하기 위해서입니다. 다음 단계를 자유롭게 진행하세요: * 🧨 Diffusers에 [파이프라인 구축 및 기여](using-diffusers/#contribute_pipeline)하는 방법을 알아보세요. 여러분이 어떤 아이디어를 내놓을지 기대됩니다! * 라이브러리에서 [기본 파이프라인](./api/pipelines/overview)을 살펴보고, 모델과 스케줄러를 별도로 사용하여 파이프라인을 처음부터 해체하고 빌드할 수 있는지 확인해 보세요.
diffusers/docs/source/ko/using-diffusers/write_own_pipeline.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/write_own_pipeline.md", "repo_id": "diffusers", "token_count": 9949 }
107
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput BITS = 8 # convert to bit representations and back taken from https://github.com/lucidrains/bit-diffusion/blob/main/bit_diffusion/bit_diffusion.py def decimal_to_bits(x, bits=BITS): """expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1""" device = x.device x = (x * 255).int().clamp(0, 255) mask = 2 ** torch.arange(bits - 1, -1, -1, device=device) mask = rearrange(mask, "d -> d 1 1") x = rearrange(x, "b c h w -> b c 1 h w") bits = ((x & mask) != 0).float() bits = rearrange(bits, "b c d h w -> b (c d) h w") bits = bits * 2 - 1 return bits def bits_to_decimal(x, bits=BITS): """expects bits from -1 to 1, outputs image tensor from 0 to 1""" device = x.device x = (x > 0).int() mask = 2 ** torch.arange(bits - 1, -1, -1, device=device, dtype=torch.int32) mask = rearrange(mask, "d -> d 1 1") x = rearrange(x, "b (c d) h w -> b c d h w", d=8) dec = reduce(x * mask, "b c d h w -> b c h w", "sum") return (dec / 255).clamp(0.0, 1.0) # modified scheduler step functions for clamping the predicted x_0 between -bit_scale and +bit_scale def ddim_bit_scheduler_step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, eta: float = 0.0, use_clipped_model_output: bool = True, generator=None, return_dict: bool = True, ) -> Union[DDIMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. eta (`float`): weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`): TODO generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class Returns: [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`: [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) # 4. Clip "predicted x_0" scale = self.bit_scale if self.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -scale, scale) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = self._get_variance(timestep, prev_timestep) std_dev_t = eta * variance ** (0.5) if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 device = model_output.device if torch.is_tensor(model_output) else "cpu" noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device) variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise prev_sample = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) def ddpm_bit_scheduler_step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, prediction_type="epsilon", generator=None, return_dict: bool = True, ) -> Union[DDPMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. prediction_type (`str`, default `epsilon`): indicates whether the model predicts the noise (epsilon), or the samples (`sample`). generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class Returns: [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`: [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ t = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: predicted_variance = None # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) elif prediction_type == "sample": pred_original_sample = model_output else: raise ValueError(f"Unsupported prediction_type {prediction_type}.") # 3. Clip "predicted x_0" scale = self.bit_scale if self.config.clip_sample: pred_original_sample = torch.clamp(pred_original_sample, -scale, scale) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise variance = 0 if t > 0: noise = torch.randn( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator ).to(model_output.device) variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise pred_prev_sample = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) class BitDiffusion(DiffusionPipeline): def __init__( self, unet: UNet2DConditionModel, scheduler: Union[DDIMScheduler, DDPMScheduler], bit_scale: Optional[float] = 1.0, ): super().__init__() self.bit_scale = bit_scale self.scheduler.step = ( ddim_bit_scheduler_step if isinstance(scheduler, DDIMScheduler) else ddpm_bit_scheduler_step ) self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, height: Optional[int] = 256, width: Optional[int] = 256, num_inference_steps: Optional[int] = 50, generator: Optional[torch.Generator] = None, batch_size: Optional[int] = 1, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[Tuple, ImagePipelineOutput]: latents = torch.randn( (batch_size, self.unet.config.in_channels, height, width), generator=generator, ) latents = decimal_to_bits(latents) * self.bit_scale latents = latents.to(self.device) self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # predict the noise residual noise_pred = self.unet(latents, t).sample # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents).prev_sample image = bits_to_decimal(latents) if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
diffusers/examples/community/bit_diffusion.py/0
{ "file_path": "diffusers/examples/community/bit_diffusion.py", "repo_id": "diffusers", "token_count": 4362 }
108
# Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import PIL.Image import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, ConfigMixin, DiffusionPipeline, SchedulerMixin, UNet2DConditionModel, logging from diffusers.configuration_utils import register_to_config from diffusers.image_processor import PipelineImageInput, VaeImageProcessor from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import BaseOutput from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline): _optional_components = ["scheduler"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: "LCMSchedulerWithTimestamp", safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() scheduler = ( scheduler if scheduler is not None else LCMSchedulerWithTimestamp( beta_start=0.00085, beta_end=0.0120, beta_schedule="scaled_linear", prediction_type="epsilon" ) ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) def _encode_prompt( self, prompt, device, num_images_per_prompt, prompt_embeds: None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. """ if prompt is not None and isinstance(prompt, str): pass elif prompt is not None and isinstance(prompt, list): len(prompt) else: prompt_embeds.shape[0] if prompt_embeds is None: text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # Don't need to get uncond prompt embedding because of LCM Guided Distillation return prompt_embeds def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept def prepare_latents( self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, latents=None, generator=None, ): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): raise ValueError( f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" ) image = image.to(device=device, dtype=dtype) # batch_size = batch_size * num_images_per_prompt if image.shape[1] == 4: init_latents = image else: if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) elif isinstance(generator, list): init_latents = [ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) ] init_latents = torch.cat(init_latents, dim=0) else: init_latents = self.vae.encode(image).latent_dist.sample(generator) init_latents = self.vae.config.scaling_factor * init_latents if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: # expand init_latents for batch_size ( f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" " your script to pass as many initial images as text prompts to suppress this warning." ) # deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) additional_image_per_prompt = batch_size // init_latents.shape[0] init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: raise ValueError( f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." ) else: init_latents = torch.cat([init_latents], dim=0) shape = init_latents.shape noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) # get latents init_latents = self.scheduler.add_noise(init_latents, noise, timestep) latents = init_latents return latents if latents is None: latents = torch.randn(shape, dtype=dtype).to(device) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps: torch.Tensor: generate embedding vectors at these timesteps embedding_dim: int: dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb def get_timesteps(self, num_inference_steps, strength, device): # get the original timestep using init_timestep init_timestep = min(int(num_inference_steps * strength), num_inference_steps) t_start = max(num_inference_steps - init_timestep, 0) timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] return timesteps, num_inference_steps - t_start @torch.no_grad() def __call__( self, prompt: Union[str, List[str]] = None, image: PipelineImageInput = None, strength: float = 0.8, height: Optional[int] = 768, width: Optional[int] = 768, guidance_scale: float = 7.5, num_images_per_prompt: Optional[int] = 1, latents: Optional[torch.FloatTensor] = None, num_inference_steps: int = 4, lcm_origin_steps: int = 50, prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ): # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG) # 3. Encode input prompt prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, prompt_embeds=prompt_embeds, ) # 3.5 encode image image = self.image_processor.preprocess(image) # 4. Prepare timesteps self.scheduler.set_timesteps(strength, num_inference_steps, lcm_origin_steps) # timesteps = self.scheduler.timesteps # timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, 1.0, device) timesteps = self.scheduler.timesteps latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) print("timesteps: ", timesteps) # 5. Prepare latent variable num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( image, latent_timestep, batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, latents, ) bs = batch_size * num_images_per_prompt # 6. Get Guidance Scale Embedding w = torch.tensor(guidance_scale).repeat(bs) w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype) # 7. LCM MultiStep Sampling Loop: with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): ts = torch.full((bs,), t, device=device, dtype=torch.long) latents = latents.to(prompt_embeds.dtype) # model prediction (v-prediction, eps, x) model_pred = self.unet( latents, ts, timestep_cond=w_embedding, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # compute the previous noisy sample x_t -> x_t-1 latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False) # # call the callback, if provided # if i == len(timesteps) - 1: progress_bar.update() denoised = denoised.to(prompt_embeds.dtype) if not output_type == "latent": image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = denoised has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class LCMSchedulerOutput(BaseOutput): """ Output class for the scheduler's `step` function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample `(x_{0})` based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.FloatTensor denoised: Optional[torch.FloatTensor] = None # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar def betas_for_alpha_bar( num_diffusion_timesteps, max_beta=0.999, alpha_transform_type="cosine", ): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. Choose from `cosine` or `exp` Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ if alpha_transform_type == "cosine": def alpha_bar_fn(t): return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(t): return math.exp(t * -12.0) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) def rescale_zero_terminal_snr(betas): """ Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) Args: betas (`torch.FloatTensor`): the betas that the scheduler is being initialized with. Returns: `torch.FloatTensor`: rescaled betas with zero terminal SNR """ # Convert betas to alphas_bar_sqrt alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_bar_sqrt = alphas_cumprod.sqrt() # Store old values. alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() # Shift so the last timestep is zero. alphas_bar_sqrt -= alphas_bar_sqrt_T # Scale so the first timestep is back to the old value. alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) # Convert alphas_bar_sqrt to betas alphas_bar = alphas_bar_sqrt**2 # Revert sqrt alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod alphas = torch.cat([alphas_bar[0:1], alphas]) betas = 1 - alphas return betas class LCMSchedulerWithTimestamp(SchedulerMixin, ConfigMixin): """ This class modifies LCMScheduler to add a timestamp argument to set_timesteps `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with non-Markovian guidance. This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic methods the library implements for all schedulers such as loading and saving. Args: num_train_timesteps (`int`, defaults to 1000): The number of diffusion steps to train the model. beta_start (`float`, defaults to 0.0001): The starting `beta` value of inference. beta_end (`float`, defaults to 0.02): The final `beta` value. beta_schedule (`str`, defaults to `"linear"`): The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, *optional*): Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. clip_sample (`bool`, defaults to `True`): Clip the predicted sample for numerical stability. clip_sample_range (`float`, defaults to 1.0): The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. set_alpha_to_one (`bool`, defaults to `True`): Each diffusion step uses the alphas product value at that step and at the previous one. For the final step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, otherwise it uses the alpha value at step 0. steps_offset (`int`, defaults to 0): An offset added to the inference steps. You can use a combination of `offset=1` and `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable Diffusion. prediction_type (`str`, defaults to `epsilon`, *optional*): Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen Video](https://imagen.research.google/video/paper.pdf) paper). thresholding (`bool`, defaults to `False`): Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such as Stable Diffusion. dynamic_thresholding_ratio (`float`, defaults to 0.995): The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. sample_max_value (`float`, defaults to 1.0): The threshold value for dynamic thresholding. Valid only when `thresholding=True`. timestep_spacing (`str`, defaults to `"leading"`): The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. rescale_betas_zero_snr (`bool`, defaults to `False`): Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and dark samples instead of limiting it to samples with medium brightness. Loosely related to [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). """ # _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, clip_sample: bool = True, set_alpha_to_one: bool = True, steps_offset: int = 0, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, clip_sample_range: float = 1.0, sample_max_value: float = 1.0, timestep_spacing: str = "leading", rescale_betas_zero_snr: bool = False, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") # Rescale for zero SNR if rescale_betas_zero_snr: self.betas = rescale_zero_terminal_snr(self.betas) self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) # At every step in ddim, we are looking into the previous alphas_cumprod # For the final step, there is no previous alphas_cumprod because we are already at 0 # `set_alpha_to_one` decides whether we set this parameter simply to one or # whether we use the final alpha of the "non-previous" one. self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64)) def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): The input sample. timestep (`int`, *optional*): The current timestep in the diffusion chain. Returns: `torch.FloatTensor`: A scaled input sample. """ return sample def _get_variance(self, timestep, prev_timestep): alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) return variance # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: """ "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing pixels from saturation at each step. We find that dynamic thresholding results in significantly better photorealism as well as better image-text alignment, especially when using very large guidance weights." https://arxiv.org/abs/2205.11487 """ dtype = sample.dtype batch_size, channels, height, width = sample.shape if dtype not in (torch.float32, torch.float64): sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half # Flatten sample for doing quantile calculation along each image sample = sample.reshape(batch_size, channels * height * width) abs_sample = sample.abs() # "a certain percentile absolute pixel value" s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) s = torch.clamp( s, min=1, max=self.config.sample_max_value ) # When clamped to min=1, equivalent to standard clipping to [-1, 1] s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0 sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s" sample = sample.reshape(batch_size, channels, height, width) sample = sample.to(dtype) return sample def set_timesteps( self, stength, num_inference_steps: int, lcm_origin_steps: int, device: Union[str, torch.device] = None ): """ Sets the discrete timesteps used for the diffusion chain (to be run before inference). Args: num_inference_steps (`int`): The number of diffusion steps used when generating samples with a pre-trained model. """ if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) self.num_inference_steps = num_inference_steps # LCM Timesteps Setting: # Linear Spacing c = self.config.num_train_timesteps // lcm_origin_steps lcm_origin_timesteps = ( np.asarray(list(range(1, int(lcm_origin_steps * stength) + 1))) * c - 1 ) # LCM Training Steps Schedule skipping_step = len(lcm_origin_timesteps) // num_inference_steps timesteps = lcm_origin_timesteps[::-skipping_step][:num_inference_steps] # LCM Inference Steps Schedule self.timesteps = torch.from_numpy(timesteps.copy()).to(device) def get_scalings_for_boundary_condition_discrete(self, t): self.sigma_data = 0.5 # Default: 0.5 # By dividing 0.1: This is almost a delta function at t=0. c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2) c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5 return c_skip, c_out def step( self, model_output: torch.FloatTensor, timeindex: int, timestep: int, sample: torch.FloatTensor, eta: float = 0.0, use_clipped_model_output: bool = False, generator=None, variance_noise: Optional[torch.FloatTensor] = None, return_dict: bool = True, ) -> Union[LCMSchedulerOutput, Tuple]: """ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): The direct output from learned diffusion model. timestep (`float`): The current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): A current instance of a sample created by the diffusion process. eta (`float`): The weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`, defaults to `False`): If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would coincide with the one provided as input and `use_clipped_model_output` has no effect. generator (`torch.Generator`, *optional*): A random number generator. variance_noise (`torch.FloatTensor`): Alternative to generating noise with `generator` by directly providing the noise for the variance itself. Useful for methods such as [`CycleDiffusion`]. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`. Returns: [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`: If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a tuple is returned where the first element is the sample tensor. """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # 1. get previous step value prev_timeindex = timeindex + 1 if prev_timeindex < len(self.timesteps): prev_timestep = self.timesteps[prev_timeindex] else: prev_timestep = timestep # 2. compute alphas, betas alpha_prod_t = self.alphas_cumprod[timestep] alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev # 3. Get scalings for boundary conditions c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep) # 4. Different Parameterization: parameterization = self.config.prediction_type if parameterization == "epsilon": # noise-prediction pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt() elif parameterization == "sample": # x-prediction pred_x0 = model_output elif parameterization == "v_prediction": # v-prediction pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output # 4. Denoise model output using boundary conditions denoised = c_out * pred_x0 + c_skip * sample # 5. Sample z ~ N(0, I), For MultiStep Inference # Noise is not used for one-step sampling. if len(self.timesteps) > 1: noise = torch.randn(model_output.shape).to(model_output.device) prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise else: prev_sample = denoised if not return_dict: return (prev_sample, denoised) return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised) # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise def add_noise( self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor, ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as original_samples alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype) timesteps = timesteps.to(original_samples.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(original_samples.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity def get_velocity( self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor ) -> torch.FloatTensor: # Make sure alphas_cumprod and timestep have same device and dtype as sample alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype) timesteps = timesteps.to(sample.device) sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 sqrt_alpha_prod = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape) < len(sample.shape): sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity def __len__(self): return self.config.num_train_timesteps
diffusers/examples/community/latent_consistency_img2img.py/0
{ "file_path": "diffusers/examples/community/latent_consistency_img2img.py", "repo_id": "diffusers", "token_count": 16205 }
109
import inspect import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Tuple, Union import matplotlib.pyplot as plt import torch import torch.nn.functional as F from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import ( FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin, ) from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.attention_processor import ( AttnProcessor2_0, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor, XFormersAttnProcessor, ) from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import ( is_accelerate_available, is_accelerate_version, is_invisible_watermark_available, logging, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor if is_invisible_watermark_available(): from diffusers.pipelines.stable_diffusion_xl.watermark import ( StableDiffusionXLWatermarker, ) logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> from diffusers import StableDiffusionXLPipeline >>> pipe = StableDiffusionXLPipeline.from_pretrained( ... "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> prompt = "a photo of an astronaut riding a horse on mars" >>> image = pipe(prompt).images[0] ``` """ def gaussian_kernel(kernel_size=3, sigma=1.0, channels=3): x_coord = torch.arange(kernel_size) gaussian_1d = torch.exp(-((x_coord - (kernel_size - 1) / 2) ** 2) / (2 * sigma**2)) gaussian_1d = gaussian_1d / gaussian_1d.sum() gaussian_2d = gaussian_1d[:, None] * gaussian_1d[None, :] kernel = gaussian_2d[None, None, :, :].repeat(channels, 1, 1, 1) return kernel def gaussian_filter(latents, kernel_size=3, sigma=1.0): channels = latents.shape[1] kernel = gaussian_kernel(kernel_size, sigma, channels).to(latents.device, latents.dtype) blurred_latents = F.conv2d(latents, kernel, padding=kernel_size // 2, groups=channels) return blurred_latents # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 """ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg return noise_cfg class DemoFusionSDXLPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin): r""" Pipeline for text-to-image generation using Stable Diffusion XL. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) In addition the pipeline inherits the following loading methods: - *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`] - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] as well as the following saving methods: - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`] Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion XL uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. text_encoder_2 ([` CLIPTextModelWithProjection`]): Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), specifically the [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). tokenizer_2 (`CLIPTokenizer`): Second Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"True"`): Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of `stabilityai/stable-diffusion-xl-base-1-0`. add_watermarker (`bool`, *optional*): Whether to use the [invisible_watermark library](https://github.com/ShieldMnt/invisible-watermark/) to watermark output images. If not defined, it will default to True if the package is installed, otherwise no watermarker will be used. """ model_cpu_offload_seq = "text_encoder->text_encoder_2->unet->vae" def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, text_encoder_2: CLIPTextModelWithProjection, tokenizer: CLIPTokenizer, tokenizer_2: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, force_zeros_for_empty_prompt: bool = True, add_watermarker: Optional[bool] = None, ): super().__init__() self.register_modules( vae=vae, text_encoder=text_encoder, text_encoder_2=text_encoder_2, tokenizer=tokenizer, tokenizer_2=tokenizer_2, unet=unet, scheduler=scheduler, ) self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.default_sample_size = self.unet.config.sample_size add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() if add_watermarker: self.watermark = StableDiffusionXLWatermarker() else: self.watermark = None # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing def enable_vae_slicing(self): r""" Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. """ self.vae.enable_slicing() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing def disable_vae_slicing(self): r""" Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_slicing() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling def enable_vae_tiling(self): r""" Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow processing larger images. """ self.vae.enable_tiling() # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling def disable_vae_tiling(self): r""" Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to computing decoding in one step. """ self.vae.disable_tiling() def encode_prompt( self, prompt: str, prompt_2: Optional[str] = None, device: Optional[torch.device] = None, num_images_per_prompt: int = 1, do_classifier_free_guidance: bool = True, negative_prompt: Optional[str] = None, negative_prompt_2: Optional[str] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ device = device or self._execution_device # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) adjust_lora_scale_text_encoder(self.text_encoder_2, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] # Define tokenizers and text encoders tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] text_encoders = ( [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] ) if prompt_embeds is None: prompt_2 = prompt_2 or prompt # textual inversion: procecss multi-vector tokens if necessary prompt_embeds_list = [] prompts = [prompt, prompt_2] for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, tokenizer) text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {tokenizer.model_max_length} tokens: {removed_text}" ) prompt_embeds = text_encoder( text_input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder pooled_prompt_embeds = prompt_embeds[0] prompt_embeds = prompt_embeds.hidden_states[-2] prompt_embeds_list.append(prompt_embeds) prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) # get unconditional embeddings for classifier free guidance zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: negative_prompt_embeds = torch.zeros_like(prompt_embeds) negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) elif do_classifier_free_guidance and negative_prompt_embeds is None: negative_prompt = negative_prompt or "" negative_prompt_2 = negative_prompt_2 or negative_prompt uncond_tokens: List[str] if prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt, negative_prompt_2] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = [negative_prompt, negative_prompt_2] negative_prompt_embeds_list = [] for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): if isinstance(self, TextualInversionLoaderMixin): negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) max_length = prompt_embeds.shape[1] uncond_input = tokenizer( negative_prompt, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) negative_prompt_embeds = text_encoder( uncond_input.input_ids.to(device), output_hidden_states=True, ) # We are only ALWAYS interested in the pooled output of the final text encoder negative_pooled_prompt_embeds = negative_prompt_embeds[0] negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] negative_prompt_embeds_list.append(negative_prompt_embeds) negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) if do_classifier_free_guidance: negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( bs_embed * num_images_per_prompt, -1 ) return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs( self, prompt, prompt_2, height, width, callback_steps, negative_prompt=None, negative_prompt_2=None, prompt_embeds=None, negative_prompt_embeds=None, pooled_prompt_embeds=None, negative_pooled_prompt_embeds=None, num_images_per_prompt=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt_2 is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") if negative_prompt is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) elif negative_prompt_2 is not None and negative_prompt_embeds is not None: raise ValueError( f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" f" {negative_prompt_embeds}. Please make sure to only forward one of the two." ) if prompt_embeds is not None and negative_prompt_embeds is not None: if prompt_embeds.shape != negative_prompt_embeds.shape: raise ValueError( "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" f" {negative_prompt_embeds.shape}." ) if prompt_embeds is not None and pooled_prompt_embeds is None: raise ValueError( "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." ) if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: raise ValueError( "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." ) # DemoFusion specific checks if max(height, width) % 1024 != 0: raise ValueError( f"the larger one of `height` and `width` has to be divisible by 1024 but are {height} and {width}." ) if num_images_per_prompt != 1: warnings.warn("num_images_per_prompt != 1 is not supported by DemoFusion and will be ignored.") num_images_per_prompt = 1 # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): add_time_ids = list(original_size + crops_coords_top_left + target_size) passed_add_embed_dim = ( self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim ) expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], dtype=dtype) return add_time_ids def get_views(self, height, width, window_size=128, stride=64, random_jitter=False): height //= self.vae_scale_factor width //= self.vae_scale_factor num_blocks_height = int((height - window_size) / stride - 1e-6) + 2 if height > window_size else 1 num_blocks_width = int((width - window_size) / stride - 1e-6) + 2 if width > window_size else 1 total_num_blocks = int(num_blocks_height * num_blocks_width) views = [] for i in range(total_num_blocks): h_start = int((i // num_blocks_width) * stride) h_end = h_start + window_size w_start = int((i % num_blocks_width) * stride) w_end = w_start + window_size if h_end > height: h_start = int(h_start + height - h_end) h_end = int(height) if w_end > width: w_start = int(w_start + width - w_end) w_end = int(width) if h_start < 0: h_end = int(h_end - h_start) h_start = 0 if w_start < 0: w_end = int(w_end - w_start) w_start = 0 if random_jitter: jitter_range = (window_size - stride) // 4 w_jitter = 0 h_jitter = 0 if (w_start != 0) and (w_end != width): w_jitter = random.randint(-jitter_range, jitter_range) elif (w_start == 0) and (w_end != width): w_jitter = random.randint(-jitter_range, 0) elif (w_start != 0) and (w_end == width): w_jitter = random.randint(0, jitter_range) if (h_start != 0) and (h_end != height): h_jitter = random.randint(-jitter_range, jitter_range) elif (h_start == 0) and (h_end != height): h_jitter = random.randint(-jitter_range, 0) elif (h_start != 0) and (h_end == height): h_jitter = random.randint(0, jitter_range) h_start += h_jitter + jitter_range h_end += h_jitter + jitter_range w_start += w_jitter + jitter_range w_end += w_jitter + jitter_range views.append((h_start, h_end, w_start, w_end)) return views def tiled_decode(self, latents, current_height, current_width): core_size = self.unet.config.sample_size // 4 core_stride = core_size pad_size = self.unet.config.sample_size // 4 * 3 decoder_view_batch_size = 1 views = self.get_views(current_height, current_width, stride=core_stride, window_size=core_size) views_batch = [views[i : i + decoder_view_batch_size] for i in range(0, len(views), decoder_view_batch_size)] latents_ = F.pad(latents, (pad_size, pad_size, pad_size, pad_size), "constant", 0) image = torch.zeros(latents.size(0), 3, current_height, current_width).to(latents.device) count = torch.zeros_like(image).to(latents.device) # get the latents corresponding to the current view coordinates with self.progress_bar(total=len(views_batch)) as progress_bar: for j, batch_view in enumerate(views_batch): len(batch_view) latents_for_view = torch.cat( [ latents_[:, :, h_start : h_end + pad_size * 2, w_start : w_end + pad_size * 2] for h_start, h_end, w_start, w_end in batch_view ] ) image_patch = self.vae.decode(latents_for_view / self.vae.config.scaling_factor, return_dict=False)[0] h_start, h_end, w_start, w_end = views[j] h_start, h_end, w_start, w_end = ( h_start * self.vae_scale_factor, h_end * self.vae_scale_factor, w_start * self.vae_scale_factor, w_end * self.vae_scale_factor, ) p_h_start, p_h_end, p_w_start, p_w_end = ( pad_size * self.vae_scale_factor, image_patch.size(2) - pad_size * self.vae_scale_factor, pad_size * self.vae_scale_factor, image_patch.size(3) - pad_size * self.vae_scale_factor, ) image[:, :, h_start:h_end, w_start:w_end] += image_patch[:, :, p_h_start:p_h_end, p_w_start:p_w_end] count[:, :, h_start:h_end, w_start:w_end] += 1 progress_bar.update() image = image / count return image # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale.StableDiffusionUpscalePipeline.upcast_vae def upcast_vae(self): dtype = self.vae.dtype self.vae.to(dtype=torch.float32) use_torch_2_0_or_xformers = isinstance( self.vae.decoder.mid_block.attentions[0].processor, ( AttnProcessor2_0, XFormersAttnProcessor, LoRAXFormersAttnProcessor, LoRAAttnProcessor2_0, ), ) # if xformers or torch_2_0 is used attention block does not need # to be in float32 which can save lots of memory if use_torch_2_0_or_xformers: self.vae.post_quant_conv.to(dtype) self.vae.decoder.conv_in.to(dtype) self.vae.decoder.mid_block.to(dtype) @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, denoising_end: Optional[float] = None, guidance_scale: float = 5.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = False, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, original_size: Optional[Tuple[int, int]] = None, crops_coords_top_left: Tuple[int, int] = (0, 0), target_size: Optional[Tuple[int, int]] = None, negative_original_size: Optional[Tuple[int, int]] = None, negative_crops_coords_top_left: Tuple[int, int] = (0, 0), negative_target_size: Optional[Tuple[int, int]] = None, ################### DemoFusion specific parameters #################### view_batch_size: int = 16, multi_decoder: bool = True, stride: Optional[int] = 64, cosine_scale_1: Optional[float] = 3.0, cosine_scale_2: Optional[float] = 1.0, cosine_scale_3: Optional[float] = 1.0, sigma: Optional[float] = 0.8, show_image: bool = False, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is used in both text-encoders height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. This is set to 1024 by default for the best results. Anything below 512 pixels won't work well for [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) and checkpoints that are not specifically fine-tuned on low resolutions. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. denoising_end (`float`, *optional*): When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be completed before it is intentionally prematurely terminated. As a result, the returned sample will still retain a substantial amount of noise as determined by the discrete timesteps selected by the scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) guidance_scale (`float`, *optional*, defaults to 5.0): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). negative_prompt_2 (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled text embeddings will be generated from `prompt` input argument. negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). guidance_rescale (`float`, *optional*, defaults to 0.7): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. `original_size` defaults to `(width, height)` if not specified. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): For most cases, `target_size` should be set to the desired height and width of the generated image. If not specified it will default to `(width, height)`. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a specific image resolution. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): To negatively condition the generation process based on a target image resolution. It should be as same as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. ################### DemoFusion specific parameters #################### view_batch_size (`int`, defaults to 16): The batch size for multiple denoising paths. Typically, a larger batch size can result in higher efficiency but comes with increased GPU memory requirements. multi_decoder (`bool`, defaults to True): Determine whether to use a tiled decoder. Generally, when the resolution exceeds 3072x3072, a tiled decoder becomes necessary. stride (`int`, defaults to 64): The stride of moving local patches. A smaller stride is better for alleviating seam issues, but it also introduces additional computational overhead and inference time. cosine_scale_1 (`float`, defaults to 3): Control the strength of skip-residual. For specific impacts, please refer to Appendix C in the DemoFusion paper. cosine_scale_2 (`float`, defaults to 1): Control the strength of dilated sampling. For specific impacts, please refer to Appendix C in the DemoFusion paper. cosine_scale_3 (`float`, defaults to 1): Control the strength of the gaussion filter. For specific impacts, please refer to Appendix C in the DemoFusion paper. sigma (`float`, defaults to 1): The standerd value of the gaussian filter. show_image (`bool`, defaults to False): Determine whether to show intermediate results during generation. Examples: Returns: a `list` with the generated images at each phase. """ # 0. Default height and width to unet height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor x1_size = self.default_sample_size * self.vae_scale_factor height_scale = height / x1_size width_scale = width / x1_size scale_num = int(max(height_scale, width_scale)) aspect_ratio = min(height_scale, width_scale) / max(height_scale, width_scale) original_size = original_size or (height, width) target_size = target_size or (height, width) # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, height, width, callback_steps, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, num_images_per_prompt, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = ( cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None ) ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, device=device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=do_classifier_free_guidance, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height // scale_num, width // scale_num, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Prepare added time ids & embeddings add_text_embeds = pooled_prompt_embeds add_time_ids = self._get_add_time_ids( original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype ) if negative_original_size is not None and negative_target_size is not None: negative_add_time_ids = self._get_add_time_ids( negative_original_size, negative_crops_coords_top_left, negative_target_size, dtype=prompt_embeds.dtype, ) else: negative_add_time_ids = add_time_ids if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) prompt_embeds = prompt_embeds.to(device) add_text_embeds = add_text_embeds.to(device) add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 7.1 Apply denoising_end if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: discrete_timestep_cutoff = int( round( self.scheduler.config.num_train_timesteps - (denoising_end * self.scheduler.config.num_train_timesteps) ) ) num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) timesteps = timesteps[:num_inference_steps] output_images = [] ############################################################### Phase 1 ################################################################# print("### Phase 1 Denoising ###") with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): latents_for_view = latents # expand the latents if we are doing classifier free guidance latent_model_input = latents.repeat_interleave(2, dim=0) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2] noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) anchor_mean = latents.mean() anchor_std = latents.std() if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) print("### Phase 1 Decoding ###") image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) image = self.image_processor.postprocess(image, output_type=output_type) if show_image: plt.figure(figsize=(10, 10)) plt.imshow(image[0]) plt.axis("off") # Turn off axis numbers and ticks plt.show() output_images.append(image[0]) ####################################################### Phase 2+ ##################################################### for current_scale_num in range(2, scale_num + 1): print("### Phase {} Denoising ###".format(current_scale_num)) current_height = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num current_width = self.unet.config.sample_size * self.vae_scale_factor * current_scale_num if height > width: current_width = int(current_width * aspect_ratio) else: current_height = int(current_height * aspect_ratio) latents = F.interpolate( latents, size=(int(current_height / self.vae_scale_factor), int(current_width / self.vae_scale_factor)), mode="bicubic", ) noise_latents = [] noise = torch.randn_like(latents) for timestep in timesteps: noise_latent = self.scheduler.add_noise(latents, noise, timestep.unsqueeze(0)) noise_latents.append(noise_latent) latents = noise_latents[0] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): count = torch.zeros_like(latents) value = torch.zeros_like(latents) cosine_factor = ( 0.5 * ( 1 + torch.cos( torch.pi * (self.scheduler.config.num_train_timesteps - t) / self.scheduler.config.num_train_timesteps ) ).cpu() ) c1 = cosine_factor**cosine_scale_1 latents = latents * (1 - c1) + noise_latents[i] * c1 ############################################# MultiDiffusion ############################################# views = self.get_views( current_height, current_width, stride=stride, window_size=self.unet.config.sample_size, random_jitter=True, ) views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)] jitter_range = (self.unet.config.sample_size - stride) // 4 latents_ = F.pad(latents, (jitter_range, jitter_range, jitter_range, jitter_range), "constant", 0) count_local = torch.zeros_like(latents_) value_local = torch.zeros_like(latents_) for j, batch_view in enumerate(views_batch): vb_size = len(batch_view) # get the latents corresponding to the current view coordinates latents_for_view = torch.cat( [ latents_[:, :, h_start:h_end, w_start:w_end] for h_start, h_end, w_start, w_end in batch_view ] ) # expand the latents if we are doing classifier free guidance latent_model_input = latents_for_view latent_model_input = ( latent_model_input.repeat_interleave(2, dim=0) if do_classifier_free_guidance else latent_model_input ) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) prompt_embeds_input = torch.cat([prompt_embeds] * vb_size) add_text_embeds_input = torch.cat([add_text_embeds] * vb_size) add_time_ids_input = [] for h_start, h_end, w_start, w_end in batch_view: add_time_ids_ = add_time_ids.clone() add_time_ids_[:, 2] = h_start * self.vae_scale_factor add_time_ids_[:, 3] = w_start * self.vae_scale_factor add_time_ids_input.append(add_time_ids_) add_time_ids_input = torch.cat(add_time_ids_input) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input} noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds_input, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2] noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg( noise_pred, noise_pred_text, guidance_rescale=guidance_rescale ) # compute the previous noisy sample x_t -> x_t-1 self.scheduler._init_step_index(t) latents_denoised_batch = self.scheduler.step( noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False )[0] # extract value from batch for latents_view_denoised, (h_start, h_end, w_start, w_end) in zip( latents_denoised_batch.chunk(vb_size), batch_view ): value_local[:, :, h_start:h_end, w_start:w_end] += latents_view_denoised count_local[:, :, h_start:h_end, w_start:w_end] += 1 value_local = value_local[ :, :, jitter_range : jitter_range + current_height // self.vae_scale_factor, jitter_range : jitter_range + current_width // self.vae_scale_factor, ] count_local = count_local[ :, :, jitter_range : jitter_range + current_height // self.vae_scale_factor, jitter_range : jitter_range + current_width // self.vae_scale_factor, ] c2 = cosine_factor**cosine_scale_2 value += value_local / count_local * (1 - c2) count += torch.ones_like(value_local) * (1 - c2) ############################################# Dilated Sampling ############################################# views = [[h, w] for h in range(current_scale_num) for w in range(current_scale_num)] views_batch = [views[i : i + view_batch_size] for i in range(0, len(views), view_batch_size)] h_pad = (current_scale_num - (latents.size(2) % current_scale_num)) % current_scale_num w_pad = (current_scale_num - (latents.size(3) % current_scale_num)) % current_scale_num latents_ = F.pad(latents, (w_pad, 0, h_pad, 0), "constant", 0) count_global = torch.zeros_like(latents_) value_global = torch.zeros_like(latents_) c3 = 0.99 * cosine_factor**cosine_scale_3 + 1e-2 std_, mean_ = latents_.std(), latents_.mean() latents_gaussian = gaussian_filter( latents_, kernel_size=(2 * current_scale_num - 1), sigma=sigma * c3 ) latents_gaussian = ( latents_gaussian - latents_gaussian.mean() ) / latents_gaussian.std() * std_ + mean_ for j, batch_view in enumerate(views_batch): latents_for_view = torch.cat( [latents_[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view] ) latents_for_view_gaussian = torch.cat( [latents_gaussian[:, :, h::current_scale_num, w::current_scale_num] for h, w in batch_view] ) vb_size = latents_for_view.size(0) # expand the latents if we are doing classifier free guidance latent_model_input = latents_for_view_gaussian latent_model_input = ( latent_model_input.repeat_interleave(2, dim=0) if do_classifier_free_guidance else latent_model_input ) latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) prompt_embeds_input = torch.cat([prompt_embeds] * vb_size) add_text_embeds_input = torch.cat([add_text_embeds] * vb_size) add_time_ids_input = torch.cat([add_time_ids] * vb_size) # predict the noise residual added_cond_kwargs = {"text_embeds": add_text_embeds_input, "time_ids": add_time_ids_input} noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds_input, cross_attention_kwargs=cross_attention_kwargs, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred[::2], noise_pred[1::2] noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg( noise_pred, noise_pred_text, guidance_rescale=guidance_rescale ) # compute the previous noisy sample x_t -> x_t-1 self.scheduler._init_step_index(t) latents_denoised_batch = self.scheduler.step( noise_pred, t, latents_for_view, **extra_step_kwargs, return_dict=False )[0] # extract value from batch for latents_view_denoised, (h, w) in zip(latents_denoised_batch.chunk(vb_size), batch_view): value_global[:, :, h::current_scale_num, w::current_scale_num] += latents_view_denoised count_global[:, :, h::current_scale_num, w::current_scale_num] += 1 c2 = cosine_factor**cosine_scale_2 value_global = value_global[:, :, h_pad:, w_pad:] value += value_global * c2 count += torch.ones_like(value_global) * c2 ########################################################### latents = torch.where(count > 0, value / count, value) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) ######################################################################################################################################### latents = (latents - latents.mean()) / latents.std() * anchor_std + anchor_mean if not output_type == "latent": # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast if needs_upcasting: self.upcast_vae() latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) print("### Phase {} Decoding ###".format(current_scale_num)) if multi_decoder: image = self.tiled_decode(latents, current_height, current_width) else: image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] # cast back to fp16 if needed if needs_upcasting: self.vae.to(dtype=torch.float16) else: image = latents if not output_type == "latent": image = self.image_processor.postprocess(image, output_type=output_type) if show_image: plt.figure(figsize=(10, 10)) plt.imshow(image[0]) plt.axis("off") # Turn off axis numbers and ticks plt.show() output_images.append(image[0]) # Offload all models self.maybe_free_model_hooks() return output_images # Overrride to properly handle the loading and unloading of the additional text encoder. def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): # We could have accessed the unet config from `lora_state_dict()` too. We pass # it here explicitly to be able to tell that it's coming from an SDXL # pipeline. # Remove any existing hooks. if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module else: raise ImportError("Offloading requires `accelerate v0.17.0` or higher.") is_model_cpu_offload = False is_sequential_cpu_offload = False recursive = False for _, component in self.components.items(): if isinstance(component, torch.nn.Module): if hasattr(component, "_hf_hook"): is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload) is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook) logger.info( "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again." ) recursive = is_sequential_cpu_offload remove_hook_from_module(component, recurse=recursive) state_dict, network_alphas = self.lora_state_dict( pretrained_model_name_or_path_or_dict, unet_config=self.unet.config, **kwargs, ) self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet) text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k} if len(text_encoder_state_dict) > 0: self.load_lora_into_text_encoder( text_encoder_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder, prefix="text_encoder", lora_scale=self.lora_scale, ) text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k} if len(text_encoder_2_state_dict) > 0: self.load_lora_into_text_encoder( text_encoder_2_state_dict, network_alphas=network_alphas, text_encoder=self.text_encoder_2, prefix="text_encoder_2", lora_scale=self.lora_scale, ) # Offload back. if is_model_cpu_offload: self.enable_model_cpu_offload() elif is_sequential_cpu_offload: self.enable_sequential_cpu_offload() @classmethod def save_lora_weights( self, save_directory: Union[str, os.PathLike], unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, is_main_process: bool = True, weight_name: str = None, save_function: Callable = None, safe_serialization: bool = True, ): state_dict = {} def pack_weights(layers, prefix): layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()} return layers_state_dict if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers): raise ValueError( "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`." ) if unet_lora_layers: state_dict.update(pack_weights(unet_lora_layers, "unet")) if text_encoder_lora_layers and text_encoder_2_lora_layers: state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder")) state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) self.write_lora_layers( state_dict=state_dict, save_directory=save_directory, is_main_process=is_main_process, weight_name=weight_name, save_function=save_function, safe_serialization=safe_serialization, ) def _remove_text_encoder_monkey_patch(self): self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder) self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)
diffusers/examples/community/pipeline_demofusion_sdxl.py/0
{ "file_path": "diffusers/examples/community/pipeline_demofusion_sdxl.py", "repo_id": "diffusers", "token_count": 35212 }
110
import math import tempfile from typing import List, Optional import numpy as np import PIL.Image import torch from accelerate import Accelerator from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel from diffusers.loaders import AttnProcsLayers, LoraLoaderMixin from diffusers.models.attention_processor import ( AttnAddedKVProcessor, AttnAddedKVProcessor2_0, LoRAAttnAddedKVProcessor, LoRAAttnProcessor, LoRAAttnProcessor2_0, SlicedAttnAddedKVProcessor, ) from diffusers.optimization import get_scheduler class SdeDragPipeline(DiffusionPipeline): r""" Pipeline for image drag-and-drop editing using stochastic differential equations: https://arxiv.org/abs/2311.01410. Please refer to the [official repository](https://github.com/ML-GSAI/SDE-Drag) for more information. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Please use [`DDIMScheduler`]. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: DPMSolverMultistepScheduler, ): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, prompt: str, image: PIL.Image.Image, mask_image: PIL.Image.Image, source_points: List[List[int]], target_points: List[List[int]], t0: Optional[float] = 0.6, steps: Optional[int] = 200, step_size: Optional[int] = 2, image_scale: Optional[float] = 0.3, adapt_radius: Optional[int] = 5, min_lora_scale: Optional[float] = 0.5, generator: Optional[torch.Generator] = None, ): r""" Function invoked when calling the pipeline for image editing. Args: prompt (`str`, *required*): The prompt to guide the image editing. image (`PIL.Image.Image`, *required*): Which will be edited, parts of the image will be masked out with `mask_image` and edited according to `prompt`. mask_image (`PIL.Image.Image`, *required*): To mask `image`. White pixels in the mask will be edited, while black pixels will be preserved. source_points (`List[List[int]]`, *required*): Used to mark the starting positions of drag editing in the image, with each pixel represented as a `List[int]` of length 2. target_points (`List[List[int]]`, *required*): Used to mark the target positions of drag editing in the image, with each pixel represented as a `List[int]` of length 2. t0 (`float`, *optional*, defaults to 0.6): The time parameter. Higher t0 improves the fidelity while lowering the faithfulness of the edited images and vice versa. steps (`int`, *optional*, defaults to 200): The number of sampling iterations. step_size (`int`, *optional*, defaults to 2): The drag diatance of each drag step. image_scale (`float`, *optional*, defaults to 0.3): To avoid duplicating the content, use image_scale to perturbs the source. adapt_radius (`int`, *optional*, defaults to 5): The size of the region for copy and paste operations during each step of the drag process. min_lora_scale (`float`, *optional*, defaults to 0.5): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. min_lora_scale specifies the minimum LoRA scale during the image drag-editing process. generator ('torch.Generator', *optional*, defaults to None): To make generation deterministic(https://pytorch.org/docs/stable/generated/torch.Generator.html). Examples: ```py >>> import PIL >>> import torch >>> from diffusers import DDIMScheduler, DiffusionPipeline >>> # Load the pipeline >>> model_path = "runwayml/stable-diffusion-v1-5" >>> scheduler = DDIMScheduler.from_pretrained(model_path, subfolder="scheduler") >>> pipe = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, custom_pipeline="sde_drag") >>> pipe.to('cuda') >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality. >>> # If not training LoRA, please avoid using torch.float16 >>> # pipe.to(torch.float16) >>> # Provide prompt, image, mask image, and the starting and target points for drag editing. >>> prompt = "prompt of the image" >>> image = PIL.Image.open('/path/to/image') >>> mask_image = PIL.Image.open('/path/to/mask_image') >>> source_points = [[123, 456]] >>> target_points = [[234, 567]] >>> # train_lora is optional, and in most cases, using train_lora can better preserve consistency with the original image. >>> pipe.train_lora(prompt, image) >>> output = pipe(prompt, image, mask_image, source_points, target_points) >>> output_image = PIL.Image.fromarray(output) >>> output_image.save("./output.png") ``` """ self.scheduler.set_timesteps(steps) noise_scale = (1 - image_scale**2) ** (0.5) text_embeddings = self._get_text_embed(prompt) uncond_embeddings = self._get_text_embed([""]) text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) latent = self._get_img_latent(image) mask = mask_image.resize((latent.shape[3], latent.shape[2])) mask = torch.tensor(np.array(mask)) mask = mask.unsqueeze(0).expand_as(latent).to(self.device) source_points = torch.tensor(source_points).div(torch.tensor([8]), rounding_mode="trunc") target_points = torch.tensor(target_points).div(torch.tensor([8]), rounding_mode="trunc") distance = target_points - source_points distance_norm_max = torch.norm(distance.float(), dim=1, keepdim=True).max() if distance_norm_max <= step_size: drag_num = 1 else: drag_num = distance_norm_max.div(torch.tensor([step_size]), rounding_mode="trunc") if (distance_norm_max / drag_num - step_size).abs() > ( distance_norm_max / (drag_num + 1) - step_size ).abs(): drag_num += 1 latents = [] for i in tqdm(range(int(drag_num)), desc="SDE Drag"): source_new = source_points + (i / drag_num * distance).to(torch.int) target_new = source_points + ((i + 1) / drag_num * distance).to(torch.int) latent, noises, hook_latents, lora_scales, cfg_scales = self._forward( latent, steps, t0, min_lora_scale, text_embeddings, generator ) latent = self._copy_and_paste( latent, source_new, target_new, adapt_radius, latent.shape[2] - 1, latent.shape[3] - 1, image_scale, noise_scale, generator, ) latent = self._backward( latent, mask, steps, t0, noises, hook_latents, lora_scales, cfg_scales, text_embeddings, generator ) latents.append(latent) result_image = 1 / 0.18215 * latents[-1] with torch.no_grad(): result_image = self.vae.decode(result_image).sample result_image = (result_image / 2 + 0.5).clamp(0, 1) result_image = result_image.cpu().permute(0, 2, 3, 1).numpy()[0] result_image = (result_image * 255).astype(np.uint8) return result_image def train_lora(self, prompt, image, lora_step=100, lora_rank=16, generator=None): accelerator = Accelerator(gradient_accumulation_steps=1, mixed_precision="fp16") self.vae.requires_grad_(False) self.text_encoder.requires_grad_(False) self.unet.requires_grad_(False) unet_lora_attn_procs = {} for name, attn_processor in self.unet.attn_processors.items(): cross_attention_dim = None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = self.unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = self.unet.config.block_out_channels[block_id] else: raise NotImplementedError("name must start with up_blocks, mid_blocks, or down_blocks") if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)): lora_attn_processor_class = LoRAAttnAddedKVProcessor else: lora_attn_processor_class = ( LoRAAttnProcessor2_0 if hasattr(torch.nn.functional, "scaled_dot_product_attention") else LoRAAttnProcessor ) unet_lora_attn_procs[name] = lora_attn_processor_class( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=lora_rank ) self.unet.set_attn_processor(unet_lora_attn_procs) unet_lora_layers = AttnProcsLayers(self.unet.attn_processors) params_to_optimize = unet_lora_layers.parameters() optimizer = torch.optim.AdamW( params_to_optimize, lr=2e-4, betas=(0.9, 0.999), weight_decay=1e-2, eps=1e-08, ) lr_scheduler = get_scheduler( "constant", optimizer=optimizer, num_warmup_steps=0, num_training_steps=lora_step, num_cycles=1, power=1.0, ) unet_lora_layers = accelerator.prepare_model(unet_lora_layers) optimizer = accelerator.prepare_optimizer(optimizer) lr_scheduler = accelerator.prepare_scheduler(lr_scheduler) with torch.no_grad(): text_inputs = self._tokenize_prompt(prompt, tokenizer_max_length=None) text_embedding = self._encode_prompt( text_inputs.input_ids, text_inputs.attention_mask, text_encoder_use_attention_mask=False ) image_transforms = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) image = image_transforms(image).to(self.device, dtype=self.vae.dtype) image = image.unsqueeze(dim=0) latents_dist = self.vae.encode(image).latent_dist for _ in tqdm(range(lora_step), desc="Train LoRA"): self.unet.train() model_input = latents_dist.sample() * self.vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn( model_input.size(), dtype=model_input.dtype, layout=model_input.layout, device=model_input.device, generator=generator, ) bsz, channels, height, width = model_input.shape # Sample a random timestep for each image timesteps = torch.randint( 0, self.scheduler.config.num_train_timesteps, (bsz,), device=model_input.device, generator=generator ) timesteps = timesteps.long() # Add noise to the model input according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_model_input = self.scheduler.add_noise(model_input, noise, timesteps) # Predict the noise residual model_pred = self.unet(noisy_model_input, timesteps, text_embedding).sample # Get the target for loss depending on the prediction type if self.scheduler.config.prediction_type == "epsilon": target = noise elif self.scheduler.config.prediction_type == "v_prediction": target = self.scheduler.get_velocity(model_input, noise, timesteps) else: raise ValueError(f"Unknown prediction type {self.scheduler.config.prediction_type}") loss = torch.nn.functional.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() with tempfile.TemporaryDirectory() as save_lora_dir: LoraLoaderMixin.save_lora_weights( save_directory=save_lora_dir, unet_lora_layers=unet_lora_layers, text_encoder_lora_layers=None, ) self.unet.load_attn_procs(save_lora_dir) def _tokenize_prompt(self, prompt, tokenizer_max_length=None): if tokenizer_max_length is not None: max_length = tokenizer_max_length else: max_length = self.tokenizer.model_max_length text_inputs = self.tokenizer( prompt, truncation=True, padding="max_length", max_length=max_length, return_tensors="pt", ) return text_inputs def _encode_prompt(self, input_ids, attention_mask, text_encoder_use_attention_mask=False): text_input_ids = input_ids.to(self.device) if text_encoder_use_attention_mask: attention_mask = attention_mask.to(self.device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids, attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] return prompt_embeds @torch.no_grad() def _get_text_embed(self, prompt): text_input = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] return text_embeddings def _copy_and_paste( self, latent, source_new, target_new, adapt_radius, max_height, max_width, image_scale, noise_scale, generator ): def adaption_r(source, target, adapt_radius, max_height, max_width): r_x_lower = min(adapt_radius, source[0], target[0]) r_x_upper = min(adapt_radius, max_width - source[0], max_width - target[0]) r_y_lower = min(adapt_radius, source[1], target[1]) r_y_upper = min(adapt_radius, max_height - source[1], max_height - target[1]) return r_x_lower, r_x_upper, r_y_lower, r_y_upper for source_, target_ in zip(source_new, target_new): r_x_lower, r_x_upper, r_y_lower, r_y_upper = adaption_r( source_, target_, adapt_radius, max_height, max_width ) source_feature = latent[ :, :, source_[1] - r_y_lower : source_[1] + r_y_upper, source_[0] - r_x_lower : source_[0] + r_x_upper ].clone() latent[ :, :, source_[1] - r_y_lower : source_[1] + r_y_upper, source_[0] - r_x_lower : source_[0] + r_x_upper ] = image_scale * source_feature + noise_scale * torch.randn( latent.shape[0], 4, r_y_lower + r_y_upper, r_x_lower + r_x_upper, device=self.device, generator=generator, ) latent[ :, :, target_[1] - r_y_lower : target_[1] + r_y_upper, target_[0] - r_x_lower : target_[0] + r_x_upper ] = source_feature * 1.1 return latent @torch.no_grad() def _get_img_latent(self, image, height=None, weight=None): data = image.convert("RGB") if height is not None: data = data.resize((weight, height)) transform = transforms.ToTensor() data = transform(data).unsqueeze(0) data = (data * 2.0) - 1.0 data = data.to(self.device, dtype=self.vae.dtype) latent = self.vae.encode(data).latent_dist.sample() latent = 0.18215 * latent return latent @torch.no_grad() def _get_eps(self, latent, timestep, guidance_scale, text_embeddings, lora_scale=None): latent_model_input = torch.cat([latent] * 2) if guidance_scale > 1.0 else latent text_embeddings = text_embeddings if guidance_scale > 1.0 else text_embeddings.chunk(2)[1] cross_attention_kwargs = None if lora_scale is None else {"scale": lora_scale} with torch.no_grad(): noise_pred = self.unet( latent_model_input, timestep, encoder_hidden_states=text_embeddings, cross_attention_kwargs=cross_attention_kwargs, ).sample if guidance_scale > 1.0: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) elif guidance_scale == 1.0: noise_pred_text = noise_pred noise_pred_uncond = 0.0 else: raise NotImplementedError(guidance_scale) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) return noise_pred def _forward_sde( self, timestep, sample, guidance_scale, text_embeddings, steps, eta=1.0, lora_scale=None, generator=None ): num_train_timesteps = len(self.scheduler) alphas_cumprod = self.scheduler.alphas_cumprod initial_alpha_cumprod = torch.tensor(1.0) prev_timestep = timestep + num_train_timesteps // steps alpha_prod_t = alphas_cumprod[timestep] if timestep >= 0 else initial_alpha_cumprod alpha_prod_t_prev = alphas_cumprod[prev_timestep] beta_prod_t_prev = 1 - alpha_prod_t_prev x_prev = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) * sample + (1 - alpha_prod_t_prev / alpha_prod_t) ** ( 0.5 ) * torch.randn( sample.size(), dtype=sample.dtype, layout=sample.layout, device=self.device, generator=generator ) eps = self._get_eps(x_prev, prev_timestep, guidance_scale, text_embeddings, lora_scale) sigma_t_prev = ( eta * (1 - alpha_prod_t) ** (0.5) * (1 - alpha_prod_t_prev / (1 - alpha_prod_t_prev) * (1 - alpha_prod_t) / alpha_prod_t) ** (0.5) ) pred_original_sample = (x_prev - beta_prod_t_prev ** (0.5) * eps) / alpha_prod_t_prev ** (0.5) pred_sample_direction_coeff = (1 - alpha_prod_t - sigma_t_prev**2) ** (0.5) noise = ( sample - alpha_prod_t ** (0.5) * pred_original_sample - pred_sample_direction_coeff * eps ) / sigma_t_prev return x_prev, noise def _sample( self, timestep, sample, guidance_scale, text_embeddings, steps, sde=False, noise=None, eta=1.0, lora_scale=None, generator=None, ): num_train_timesteps = len(self.scheduler) alphas_cumprod = self.scheduler.alphas_cumprod final_alpha_cumprod = torch.tensor(1.0) eps = self._get_eps(sample, timestep, guidance_scale, text_embeddings, lora_scale) prev_timestep = timestep - num_train_timesteps // steps alpha_prod_t = alphas_cumprod[timestep] alpha_prod_t_prev = alphas_cumprod[prev_timestep] if prev_timestep >= 0 else final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t sigma_t = ( eta * ((1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) ** (0.5) * (1 - alpha_prod_t / alpha_prod_t_prev) ** (0.5) if sde else 0 ) pred_original_sample = (sample - beta_prod_t ** (0.5) * eps) / alpha_prod_t ** (0.5) pred_sample_direction_coeff = (1 - alpha_prod_t_prev - sigma_t**2) ** (0.5) noise = ( torch.randn( sample.size(), dtype=sample.dtype, layout=sample.layout, device=self.device, generator=generator ) if noise is None else noise ) latent = ( alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction_coeff * eps + sigma_t * noise ) return latent def _forward(self, latent, steps, t0, lora_scale_min, text_embeddings, generator): def scale_schedule(begin, end, n, length, type="linear"): if type == "constant": return end elif type == "linear": return begin + (end - begin) * n / length elif type == "cos": factor = (1 - math.cos(n * math.pi / length)) / 2 return (1 - factor) * begin + factor * end else: raise NotImplementedError(type) noises = [] latents = [] lora_scales = [] cfg_scales = [] latents.append(latent) t0 = int(t0 * steps) t_begin = steps - t0 length = len(self.scheduler.timesteps[t_begin - 1 : -1]) - 1 index = 1 for t in self.scheduler.timesteps[t_begin:].flip(dims=[0]): lora_scale = scale_schedule(1, lora_scale_min, index, length, type="cos") cfg_scale = scale_schedule(1, 3.0, index, length, type="linear") latent, noise = self._forward_sde( t, latent, cfg_scale, text_embeddings, steps, lora_scale=lora_scale, generator=generator ) noises.append(noise) latents.append(latent) lora_scales.append(lora_scale) cfg_scales.append(cfg_scale) index += 1 return latent, noises, latents, lora_scales, cfg_scales def _backward( self, latent, mask, steps, t0, noises, hook_latents, lora_scales, cfg_scales, text_embeddings, generator ): t0 = int(t0 * steps) t_begin = steps - t0 hook_latent = hook_latents.pop() latent = torch.where(mask > 128, latent, hook_latent) for t in self.scheduler.timesteps[t_begin - 1 : -1]: latent = self._sample( t, latent, cfg_scales.pop(), text_embeddings, steps, sde=True, noise=noises.pop(), lora_scale=lora_scales.pop(), generator=generator, ) hook_latent = hook_latents.pop() latent = torch.where(mask > 128, latent, hook_latent) return latent
diffusers/examples/community/sde_drag.py/0
{ "file_path": "diffusers/examples/community/sde_drag.py", "repo_id": "diffusers", "token_count": 11664 }
111