chunk_id
stringlengths 44
45
| chunk_content
stringlengths 21
448
| filename
stringlengths 36
36
|
---|---|---|
14c02d08a3d458adfe91e8c9d925a41a.txt_chunk_21
|
_epoch_loss = eval_loss / len(eval_dataloader)
eval_ppl = torch.exp(eval_epoch_loss)
train_epoch_loss = total_loss / len(train_dataloader)
train_ppl = torch.exp(train_epoch_loss)
print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
Let’s see how well the model performs on the validation set:
Copied
correct = 0
total = 0
for pred, true in zip(eval_preds, dataset["validation"]["text_label"]):
|
14c02d08a3d458adfe91e8c9d925a41a.txt
|
14c02d08a3d458adfe91e8c9d925a41a.txt_chunk_22
|
the validation set:
Copied
correct = 0
total = 0
for pred, true in zip(eval_preds, dataset["validation"]["text_label"]):
if pred.strip() == true.strip():
correct += 1
total += 1
accuracy = correct / total * 100
print(f"{accuracy=} % on the evaluation dataset")
print(f"{eval_preds[:10]=}")
print(f"{dataset['validation']['text_label'][:10]=}")
"accuracy=97.3568281938326 % on the evaluation dataset"
"eval_preds[:10]=['neutral',
|
14c02d08a3d458adfe91e8c9d925a41a.txt
|
14c02d08a3d458adfe91e8c9d925a41a.txt_chunk_23
|
taset['validation']['text_label'][:10]=}")
"accuracy=97.3568281938326 % on the evaluation dataset"
"eval_preds[:10]=['neutral', 'positive', 'neutral', 'positive', 'neutral', 'negative', 'negative', 'neutral', 'neutral', 'neutral']"
"dataset['validation']['text_label'][:10]=['neutral', 'positive', 'neutral', 'positive', 'neutral', 'negative', 'negative', 'neutral', 'neutral', 'neutral']"
97% accuracy in just a few minutes; pretty good!
Share mo
|
14c02d08a3d458adfe91e8c9d925a41a.txt
|
14c02d08a3d458adfe91e8c9d925a41a.txt_chunk_24
|
'neutral', 'negative', 'negative', 'neutral', 'neutral', 'neutral']"
97% accuracy in just a few minutes; pretty good!
Share model
You can store and share your model on the Hub if you’d like. Login to your Hugging Face account and enter your token when prompted:
Copied
from huggingface_hub import notebook_login
notebook_login()
Upload the model to a specifc model repository on the Hub with the push_to_hub function:
Copied
peft_model_i
|
14c02d08a3d458adfe91e8c9d925a41a.txt
|
14c02d08a3d458adfe91e8c9d925a41a.txt_chunk_25
|
notebook_login()
Upload the model to a specifc model repository on the Hub with the push_to_hub function:
Copied
peft_model_id = "your-name/t5-large_PREFIX_TUNING_SEQ2SEQ"
model.push_to_hub("your-name/t5-large_PREFIX_TUNING_SEQ2SEQ", use_auth_token=True)
If you check the model file size in the repository, you’ll see that it is only 3.93MB! 🤏
Inference
Once the model has been uploaded to the Hub, anyone can easily use it for inference. Loa
|
14c02d08a3d458adfe91e8c9d925a41a.txt
|
14c02d08a3d458adfe91e8c9d925a41a.txt_chunk_26
|
that it is only 3.93MB! 🤏
Inference
Once the model has been uploaded to the Hub, anyone can easily use it for inference. Load the configuration and model:
Copied
from peft import PeftModel, PeftConfig
peft_model_id = "stevhliu/t5-large_PREFIX_TUNING_SEQ2SEQ"
config = PeftConfig.from_pretrained(peft_model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, peft_model
|
14c02d08a3d458adfe91e8c9d925a41a.txt
|
14c02d08a3d458adfe91e8c9d925a41a.txt_chunk_27
|
odel = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, peft_model_id)
Get and tokenize some text about financial news:
Copied
inputs = tokenizer(
"The Lithuanian beer market made up 14.41 million liters in January , a rise of 0.8 percent from the year-earlier figure , the Lithuanian Brewers ' Association reporting citing the results from its members .",
return_tensors="pt
|
14c02d08a3d458adfe91e8c9d925a41a.txt
|
14c02d08a3d458adfe91e8c9d925a41a.txt_chunk_28
|
r-earlier figure , the Lithuanian Brewers ' Association reporting citing the results from its members .",
return_tensors="pt",
)
Put the model on a GPU and generate the predicted text sentiment:
Copied
model.to(device)
with torch.no_grad():
inputs = {k: v.to(device) for k, v in inputs.items()}
outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10)
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(
|
14c02d08a3d458adfe91e8c9d925a41a.txt
|
14c02d08a3d458adfe91e8c9d925a41a.txt_chunk_29
|
model.generate(input_ids=inputs["input_ids"], max_new_tokens=10)
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
["positive"]
|
14c02d08a3d458adfe91e8c9d925a41a.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_1
|
Tuners
Each tuner (or PEFT method) has a configuration and model.
LoRA
For finetuning a model with LoRA.
class peft.LoraConfig
<
source
>
(
peft_type: typing.Union[str, peft.utils.config.PeftType] = None
auto_mapping: typing.Optional[dict] = None
base_model_name_or_path: str = None
revision: str = None
task_type: typing.Union[str, peft.utils.config.TaskType] = None
inference_mode: bool = False
r: int = 8
target_modules: typing.Union[typi
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_2
|
: typing.Union[str, peft.utils.config.TaskType] = None
inference_mode: bool = False
r: int = 8
target_modules: typing.Union[typing.List[str], str, NoneType] = None
lora_alpha: int = 8
lora_dropout: float = 0.0
fan_in_fan_out: bool = False
bias: str = 'none'
modules_to_save: typing.Optional[typing.List[str]] = None
init_lora_weights: bool = True
layers_to_transform: typing.Union[typing.List, int, NoneType] = None
layers_pattern: typing.Optional[
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_3
|
_lora_weights: bool = True
layers_to_transform: typing.Union[typing.List, int, NoneType] = None
layers_pattern: typing.Optional[str] = None
)
Parameters
r (int) — Lora attention dimension.
target_modules (Union[List[str],str]) — The names of the modules to apply Lora to.
lora_alpha (int) — The alpha parameter for Lora scaling.
lora_dropout (float) — The dropout probability for Lora layers.
fan_in_fan_out (bool) — Set this to True i
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_4
|
for Lora scaling.
lora_dropout (float) — The dropout probability for Lora layers.
fan_in_fan_out (bool) — Set this to True if the layer to replace stores weight like (fan_in, fan_out).
For example, gpt-2 uses Conv1D which stores weights like (fan_in, fan_out) and hence this should be set to True. —
bias (str) — Bias type for Lora. Can be ‘none’, ‘all’ or ‘lora_only’. If ‘all’ or ‘lora_only’, the
corresponding biases will be updated duri
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_5
|
Bias type for Lora. Can be ‘none’, ‘all’ or ‘lora_only’. If ‘all’ or ‘lora_only’, the
corresponding biases will be updated during training. Be aware that this means that, even when disabling
the adapters, the model will not produce the same output as the base model would have without adaptation.
modules_to_save (List[str]) —List of modules apart from LoRA layers to be set as trainable
and saved in the final checkpoint.
layers_to_transform
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_6
|
t[str]) —List of modules apart from LoRA layers to be set as trainable
and saved in the final checkpoint.
layers_to_transform (Union[List[int],int]) —
The layer indexes to transform, if this argument is specified, it will apply the LoRA transformations on
the layer indexes that are specified in this list. If a single integer is passed, it will apply the LoRA
transformations on the layer at this index.
layers_pattern (str) —
The layer patter
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_7
|
integer is passed, it will apply the LoRA
transformations on the layer at this index.
layers_pattern (str) —
The layer pattern name, used only if layers_to_transform is different from None and if the layer
pattern is not in the common layers pattern.
This is the configuration class to store the configuration of a LoraModel.
class peft.LoraModel
<
source
>
(
model
config
adapter_name
)
→
torch.nn.Module
Parameters
model (PreTrainedMo
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_8
|
LoraModel.
class peft.LoraModel
<
source
>
(
model
config
adapter_name
)
→
torch.nn.Module
Parameters
model (PreTrainedModel) — The model to be adapted.
config (LoraConfig) — The configuration of the Lora model.
Returns
torch.nn.Module
The Lora model.
Creates Low Rank Adapter (Lora) model from a pretrained transformers model.
Example:
Copied
>>> from transformers import AutoModelForSeq2SeqLM
>>> from peft import LoraModel,
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_9
|
ined transformers model.
Example:
Copied
>>> from transformers import AutoModelForSeq2SeqLM
>>> from peft import LoraModel, LoraConfig
>>> config = LoraConfig(
... task_type="SEQ_2_SEQ_LM",
... r=8,
... lora_alpha=32,
... target_modules=["q", "v"],
... lora_dropout=0.01,
... )
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> lora_model = LoraModel(model, config, "default")
Copied
>>> import transfor
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_10
|
ModelForSeq2SeqLM.from_pretrained("t5-base")
>>> lora_model = LoraModel(model, config, "default")
Copied
>>> import transformers
>>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_int8_training
>>> target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "fc_in", "fc_out", "wte"]
>>> config = LoraConfig(
... r=4, lora_alpha=16, target_modules=target_modules, lora_dropout=0.1, bias="none", task_type="CAUSAL
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_11
|
config = LoraConfig(
... r=4, lora_alpha=16, target_modules=target_modules, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM"
... )
>>> model = transformers.GPTJForCausalLM.from_pretrained(
... "kakaobrain/kogpt",
... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b
... pad_token_id=tokenizer.eos_token_id,
... use_cache=False,
... device_map={"": rank},
... torch_dtype=torch.f
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_12
|
... pad_token_id=tokenizer.eos_token_id,
... use_cache=False,
... device_map={"": rank},
... torch_dtype=torch.float16,
... load_in_8bit=True,
... )
>>> model = prepare_model_for_int8_training(model)
>>> lora_model = get_peft_model(model, config)
Attributes:
model (PreTrainedModel) — The model to be adapted.
peft_config (LoraConfig): The configuration of the Lora model.
add_weighted_adapter
<
source
>
(
adapters
weights
ad
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_13
|
adapted.
peft_config (LoraConfig): The configuration of the Lora model.
add_weighted_adapter
<
source
>
(
adapters
weights
adapter_name
combination_type = 'svd'
)
Parameters
adapters (list) — List of adapter names to be merged.
weights (list) — List of weights for each adapter.
adapter_name (str) — Name of the new adapter.
combination_type (str) — Type of merging. Can be one of [svd, linear]
This method adds a new adapter by me
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_14
|
the new adapter.
combination_type (str) — Type of merging. Can be one of [svd, linear]
This method adds a new adapter by merging the given adapters with the given weights.
delete_adapter
<
source
>
(
adapter_name
)
Parameters
adapter_name (str) — Name of the adapter to be deleted.
Deletes an existing adapter.
merge_adapter
<
source
>
(
)
This method merges the LoRa layers into the base model.
merge_and_unload
<
source
>
(
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_15
|
pter.
merge_adapter
<
source
>
(
)
This method merges the LoRa layers into the base model.
merge_and_unload
<
source
>
(
)
This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model
as a standalone model.
Example:
Copied
>>> from transformers import AutoModelForCausalLM
>>> from peft import PeftModel
>>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b")
>>> p
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_16
|
odelForCausalLM
>>> from peft import PeftModel
>>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b")
>>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample"
>>> model = PeftModel.from_pretrained(base_model, peft_model_id)
>>> merged_model = model.merge_and_unload()
unload
<
source
>
(
)
Gets back the base model by removing all the lora modules without merging. This gives back the original base
model
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_17
|
urce
>
(
)
Gets back the base model by removing all the lora modules without merging. This gives back the original base
model.
unmerge_adapter
<
source
>
(
)
This method unmerges the LoRa layers from the base model.
class peft.tuners.lora.LoraLayer
<
source
>
(
in_features: int
out_features: int
**kwargs
)
class peft.tuners.lora.Linear
<
source
>
(
adapter_name: str
in_features: int
out_features: int
r: int = 0
lora_alpha: int =
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_18
|
class peft.tuners.lora.Linear
<
source
>
(
adapter_name: str
in_features: int
out_features: int
r: int = 0
lora_alpha: int = 1
lora_dropout: float = 0.0
fan_in_fan_out: bool = False
is_target_conv_1d_layer: bool = False
**kwargs
)
P-tuning
class peft.PromptEncoderConfig
<
source
>
(
peft_type: typing.Union[str, peft.utils.config.PeftType] = None
auto_mapping: typing.Optional[dict] = None
base_model_name_or_path: str = None
revision
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_19
|
[str, peft.utils.config.PeftType] = None
auto_mapping: typing.Optional[dict] = None
base_model_name_or_path: str = None
revision: str = None
task_type: typing.Union[str, peft.utils.config.TaskType] = None
inference_mode: bool = False
num_virtual_tokens: int = None
token_dim: int = None
num_transformer_submodules: typing.Optional[int] = None
num_attention_heads: typing.Optional[int] = None
num_layers: typing.Optional[int] = None
encoder_reparame
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_20
|
g.Optional[int] = None
num_attention_heads: typing.Optional[int] = None
num_layers: typing.Optional[int] = None
encoder_reparameterization_type: typing.Union[str, peft.tuners.p_tuning.PromptEncoderReparameterizationType] = <PromptEncoderReparameterizationType.MLP: 'MLP'>
encoder_hidden_size: int = None
encoder_num_layers: int = 2
encoder_dropout: float = 0.0
)
Parameters
encoder_reparameterization_type (Union[PromptEncoderReparameterizatio
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_21
|
rs: int = 2
encoder_dropout: float = 0.0
)
Parameters
encoder_reparameterization_type (Union[PromptEncoderReparameterizationType, str]) —
The type of reparameterization to use.
encoder_hidden_size (int) — The hidden size of the prompt encoder.
encoder_num_layers (int) — The number of layers of the prompt encoder.
encoder_dropout (float) — The dropout probability of the prompt encoder.
This is the configuration class to store the c
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_22
|
er.
encoder_dropout (float) — The dropout probability of the prompt encoder.
This is the configuration class to store the configuration of a PromptEncoder.
class peft.PromptEncoder
<
source
>
(
config
)
Parameters
config (PromptEncoderConfig) — The configuration of the prompt encoder.
The prompt encoder network that is used to generate the virtual token embeddings for p-tuning.
Example:
Copied
>>> from peft import PromptEncod
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_23
|
network that is used to generate the virtual token embeddings for p-tuning.
Example:
Copied
>>> from peft import PromptEncoder, PromptEncoderConfig
>>> config = PromptEncoderConfig(
... peft_type="P_TUNING",
... task_type="SEQ_2_SEQ_LM",
... num_virtual_tokens=20,
... token_dim=768,
... num_transformer_submodules=1,
... num_attention_heads=12,
... num_layers=12,
... encoder_reparameterization_type="MLP",
...
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_24
|
nsformer_submodules=1,
... num_attention_heads=12,
... num_layers=12,
... encoder_reparameterization_type="MLP",
... encoder_hidden_size=768,
... )
>>> prompt_encoder = PromptEncoder(config)
Attributes:
embedding (torch.nn.Embedding) — The embedding layer of the prompt encoder.
mlp_head (torch.nn.Sequential) — The MLP head of the prompt encoder if inference_mode=False.
lstm_head (torch.nn.LSTM) — The LSTM head of the prompt enc
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_25
|
ntial) — The MLP head of the prompt encoder if inference_mode=False.
lstm_head (torch.nn.LSTM) — The LSTM head of the prompt encoder if inference_mode=False and
encoder_reparameterization_type="LSTM".
token_dim (int) — The hidden embedding dimension of the base transformer model.
input_size (int) — The input size of the prompt encoder.
output_size (int) — The output size of the prompt encoder.
hidden_size (int) — The hidden size of the prompt e
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_26
|
e prompt encoder.
output_size (int) — The output size of the prompt encoder.
hidden_size (int) — The hidden size of the prompt encoder.
total_virtual_tokens (int): The total number of virtual tokens of the
prompt encoder.
encoder_type (Union[PromptEncoderReparameterizationType, str]): The encoder type of the prompt
encoder.
Input shape: (batch_size, total_virtual_tokens)
Output shape: (batch_size, total_virtual_tokens, token_dim)
Prefix tuning
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_27
|
oder.
Input shape: (batch_size, total_virtual_tokens)
Output shape: (batch_size, total_virtual_tokens, token_dim)
Prefix tuning
class peft.PrefixTuningConfig
<
source
>
(
peft_type: typing.Union[str, peft.utils.config.PeftType] = None
auto_mapping: typing.Optional[dict] = None
base_model_name_or_path: str = None
revision: str = None
task_type: typing.Union[str, peft.utils.config.TaskType] = None
inference_mode: bool = False
num_virtual_tok
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_28
|
evision: str = None
task_type: typing.Union[str, peft.utils.config.TaskType] = None
inference_mode: bool = False
num_virtual_tokens: int = None
token_dim: int = None
num_transformer_submodules: typing.Optional[int] = None
num_attention_heads: typing.Optional[int] = None
num_layers: typing.Optional[int] = None
encoder_hidden_size: int = None
prefix_projection: bool = False
)
Parameters
encoder_hidden_size (int) — The hidden size of the prom
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_29
|
idden_size: int = None
prefix_projection: bool = False
)
Parameters
encoder_hidden_size (int) — The hidden size of the prompt encoder.
prefix_projection (bool) — Whether to project the prefix embeddings.
This is the configuration class to store the configuration of a PrefixEncoder.
class peft.PrefixEncoder
<
source
>
(
config
)
Parameters
config (PrefixTuningConfig) — The configuration of the prefix encoder.
The torch.nn mod
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_30
|
<
source
>
(
config
)
Parameters
config (PrefixTuningConfig) — The configuration of the prefix encoder.
The torch.nn model to encode the prefix.
Example:
Copied
>>> from peft import PrefixEncoder, PrefixTuningConfig
>>> config = PrefixTuningConfig(
... peft_type="PREFIX_TUNING",
... task_type="SEQ_2_SEQ_LM",
... num_virtual_tokens=20,
... token_dim=768,
... num_transformer_submodules=1,
... num_attention_hea
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_31
|
2_SEQ_LM",
... num_virtual_tokens=20,
... token_dim=768,
... num_transformer_submodules=1,
... num_attention_heads=12,
... num_layers=12,
... encoder_hidden_size=768,
... )
>>> prefix_encoder = PrefixEncoder(config)
Attributes:
embedding (torch.nn.Embedding) — The embedding layer of the prefix encoder.
transform (torch.nn.Sequential) — The two-layer MLP to transform the prefix embeddings if
prefix_projection is True.
pre
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_32
|
ncoder.
transform (torch.nn.Sequential) — The two-layer MLP to transform the prefix embeddings if
prefix_projection is True.
prefix_projection (bool) — Whether to project the prefix embeddings.
Input shape: (batch_size, num_virtual_tokens)
Output shape: (batch_size, num_virtual_tokens, 2*layers*hidden)
Prompt tuning
class peft.PromptTuningConfig
<
source
>
(
peft_type: typing.Union[str, peft.utils.config.PeftType] = None
auto_mapping: typi
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_33
|
class peft.PromptTuningConfig
<
source
>
(
peft_type: typing.Union[str, peft.utils.config.PeftType] = None
auto_mapping: typing.Optional[dict] = None
base_model_name_or_path: str = None
revision: str = None
task_type: typing.Union[str, peft.utils.config.TaskType] = None
inference_mode: bool = False
num_virtual_tokens: int = None
token_dim: int = None
num_transformer_submodules: typing.Optional[int] = None
num_attention_heads: typing.Optional
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_34
|
s: int = None
token_dim: int = None
num_transformer_submodules: typing.Optional[int] = None
num_attention_heads: typing.Optional[int] = None
num_layers: typing.Optional[int] = None
prompt_tuning_init: typing.Union[peft.tuners.prompt_tuning.PromptTuningInit, str] = <PromptTuningInit.RANDOM: 'RANDOM'>
prompt_tuning_init_text: typing.Optional[str] = None
tokenizer_name_or_path: typing.Optional[str] = None
)
Parameters
prompt_tuning_init (Unio
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_35
|
text: typing.Optional[str] = None
tokenizer_name_or_path: typing.Optional[str] = None
)
Parameters
prompt_tuning_init (Union[PromptTuningInit, str]) — The initialization of the prompt embedding.
prompt_tuning_init_text (str, optional) —
The text to initialize the prompt embedding. Only used if prompt_tuning_init is TEXT.
tokenizer_name_or_path (str, optional) —
The name or path of the tokenizer. Only used if prompt_tuning_init is TEXT.
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_36
|
is TEXT.
tokenizer_name_or_path (str, optional) —
The name or path of the tokenizer. Only used if prompt_tuning_init is TEXT.
This is the configuration class to store the configuration of a PromptEmbedding.
class peft.PromptEmbedding
<
source
>
(
config
word_embeddings
)
Parameters
config (PromptTuningConfig) — The configuration of the prompt embedding.
word_embeddings (torch.nn.Module) — The word embeddings of the base transform
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_37
|
ig) — The configuration of the prompt embedding.
word_embeddings (torch.nn.Module) — The word embeddings of the base transformer model.
The model to encode virtual tokens into prompt embeddings.
Attributes:
embedding (torch.nn.Embedding) — The embedding layer of the prompt embedding.
Example:
Copied
>>> from peft import PromptEmbedding, PromptTuningConfig
>>> config = PromptTuningConfig(
... peft_type="PROMPT_TUNING",
... task
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_38
|
eft import PromptEmbedding, PromptTuningConfig
>>> config = PromptTuningConfig(
... peft_type="PROMPT_TUNING",
... task_type="SEQ_2_SEQ_LM",
... num_virtual_tokens=20,
... token_dim=768,
... num_transformer_submodules=1,
... num_attention_heads=12,
... num_layers=12,
... prompt_tuning_init="TEXT",
... prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral",
... toke
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_39
|
nit="TEXT",
... prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral",
... tokenizer_name_or_path="t5-base",
... )
>>> # t5_model.shared is the word embeddings of the base model
>>> prompt_embedding = PromptEmbedding(config, t5_model.shared)
Input Shape: (batch_size, total_virtual_tokens)
Output Shape: (batch_size, total_virtual_tokens, token_dim)
IA3
class peft.IA3Config
<
source
>
(
peft_
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_40
|
tal_virtual_tokens)
Output Shape: (batch_size, total_virtual_tokens, token_dim)
IA3
class peft.IA3Config
<
source
>
(
peft_type: typing.Union[str, peft.utils.config.PeftType] = None
auto_mapping: typing.Optional[dict] = None
base_model_name_or_path: str = None
revision: str = None
task_type: typing.Union[str, peft.utils.config.TaskType] = None
inference_mode: bool = False
target_modules: typing.Union[typing.List[str], str, NoneType] = None
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_41
|
.utils.config.TaskType] = None
inference_mode: bool = False
target_modules: typing.Union[typing.List[str], str, NoneType] = None
feedforward_modules: typing.Union[typing.List[str], str, NoneType] = None
fan_in_fan_out: bool = False
modules_to_save: typing.Optional[typing.List[str]] = None
init_ia3_weights: bool = True
)
Parameters
target_modules (Union[List[str],str]) — The names of the modules to apply (IA)^3 to.
feedforward_modules (Un
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_42
|
)
Parameters
target_modules (Union[List[str],str]) — The names of the modules to apply (IA)^3 to.
feedforward_modules (Union[List[str],str]) — The names of the modules to be treated as feedforward modules
as in the original paper. —
fan_in_fan_out (bool) — Set this to True if the layer to replace stores weight like (fan_in, fan_out).
For example, gpt-2 uses Conv1D which stores weights like (fan_in, fan_out) and hence this should be
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_43
|
ht like (fan_in, fan_out).
For example, gpt-2 uses Conv1D which stores weights like (fan_in, fan_out) and hence this should be set to True. —
modules_to_save (List[str]) —List of modules apart from (IA)^3 layers to be set as trainable
and saved in the final checkpoint.
init_ia3_weights (bool) — Whether to initialize the vectors in the (IA)^3 layers, defaults to True.
This is the configuration class to store the configuration of a IA3Mo
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_44
|
ize the vectors in the (IA)^3 layers, defaults to True.
This is the configuration class to store the configuration of a IA3Model.
class peft.IA3Model
<
source
>
(
model
config
adapter_name
)
→
torch.nn.Module
Parameters
model (PreTrainedModel) — The model to be adapted.
config (IA3Config) — The configuration of the (IA)^3 model.
Returns
torch.nn.Module
The (IA)^3 model.
Creates a Infused Adapter by Inhibiting and Amplifying I
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_45
|
of the (IA)^3 model.
Returns
torch.nn.Module
The (IA)^3 model.
Creates a Infused Adapter by Inhibiting and Amplifying Inner Activations ((IA)^3) model from a pretrained
transformers model. The method is described in detail in https://arxiv.org/abs/2205.05638
Example:
Copied
>>> from transformers import AutoModelForSeq2SeqLM, ia3Config
>>> from peft import IA3Model, IA3Config
>>> config = IA3Config(
... peft_type="IA3",
...
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_46
|
oModelForSeq2SeqLM, ia3Config
>>> from peft import IA3Model, IA3Config
>>> config = IA3Config(
... peft_type="IA3",
... task_type="SEQ_2_SEQ_LM",
... target_modules=["k", "v", "w0"],
... feedforward_modules=["w0"],
... )
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
>>> ia3_model = IA3Model(config, model)
Attributes:
model (PreTrainedModel) — The model to be adapted.
peft_config (ia3Config): The configuration of
|
6200969f4271910e1618223a2de243f8.txt
|
6200969f4271910e1618223a2de243f8.txt_chunk_47
|
odel(config, model)
Attributes:
model (PreTrainedModel) — The model to be adapted.
peft_config (ia3Config): The configuration of the (IA)^3 model.
merge_and_unload
<
source
>
(
)
This method merges the (IA)^3 layers into the base model. This is needed if someone wants to use the base model
as a standalone model.
|
6200969f4271910e1618223a2de243f8.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_1
|
Working with custom models
Some fine-tuning techniques, such as prompt tuning, are specific to language models. That means in 🤗 PEFT, it is
assumed a 🤗 Transformers model is being used. However, other fine-tuning techniques - like
LoRA - are not restricted to specific model types.
In this guide, we will see how LoRA can be applied to a multilayer perception and a computer vision model from the timm library.
Multilayer perceptron
Let’s assu
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_2
|
can be applied to a multilayer perception and a computer vision model from the timm library.
Multilayer perceptron
Let’s assume that we want to fine-tune a multilayer perceptron with LoRA. Here is the definition:
Copied
from torch import nn
class MLP(nn.Module):
def __init__(self, num_units_hidden=2000):
super().__init__()
self.seq = nn.Sequential(
nn.Linear(20, num_units_hidden),
nn.ReLU(),
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_3
|
super().__init__()
self.seq = nn.Sequential(
nn.Linear(20, num_units_hidden),
nn.ReLU(),
nn.Linear(num_units_hidden, num_units_hidden),
nn.ReLU(),
nn.Linear(num_units_hidden, 2),
nn.LogSoftmax(dim=-1),
)
def forward(self, X):
return self.seq(X)
This is a straightforward multilayer perceptron with an input layer, a hidden layer, and an outp
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_4
|
X):
return self.seq(X)
This is a straightforward multilayer perceptron with an input layer, a hidden layer, and an output layer.
For this toy example, we choose an exceedingly large number of hidden units to highlight the efficiency gains
from PEFT, but those gains are in line with more realistic examples.
There are a few linear layers in this model that could be tuned with LoRA. When working with common 🤗 Transformers
models, PEFT wi
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_5
|
ere are a few linear layers in this model that could be tuned with LoRA. When working with common 🤗 Transformers
models, PEFT will know which layers to apply LoRA to, but in this case, it is up to us as a user to choose the layers.
To determine the names of the layers to tune:
Copied
print([(n, type(m)) for n, m in MLP().named_modules()])
This should print:
Copied
[('', __main__.MLP),
('seq', torch.nn.modules.container.Sequential),
('se
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_6
|
MLP().named_modules()])
This should print:
Copied
[('', __main__.MLP),
('seq', torch.nn.modules.container.Sequential),
('seq.0', torch.nn.modules.linear.Linear),
('seq.1', torch.nn.modules.activation.ReLU),
('seq.2', torch.nn.modules.linear.Linear),
('seq.3', torch.nn.modules.activation.ReLU),
('seq.4', torch.nn.modules.linear.Linear),
('seq.5', torch.nn.modules.activation.LogSoftmax)]
Let’s say we want to apply LoRA to the input laye
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_7
|
nn.modules.linear.Linear),
('seq.5', torch.nn.modules.activation.LogSoftmax)]
Let’s say we want to apply LoRA to the input layer and to the hidden layer, those are 'seq.0' and 'seq.1'. Moreover,
let’s assume we want to update the output layer without LoRA, that would be 'seq.4'. The corresponding config would
be:
Copied
from peft import LoraConfig
config = LoraConfig(
target_modules=["seq.0", "seq.2"],
modules_to_save=["seq.4"],
)
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_8
|
opied
from peft import LoraConfig
config = LoraConfig(
target_modules=["seq.0", "seq.2"],
modules_to_save=["seq.4"],
)
With that, we can create our PEFT model and check the fraction of parameters trained:
Copied
from peft import get_peft_model
model = MLP()
peft_model = get_peft_model(module, config)
peft_model.print_trainable_parameters()
# prints trainable params: 56,164 || all params: 4,100,164 || trainable%: 1.369798866581922
F
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_9
|
model.print_trainable_parameters()
# prints trainable params: 56,164 || all params: 4,100,164 || trainable%: 1.369798866581922
Finally, we can use any training framework we like, or write our own fit loop, to train the peft_model.
For a complete example, check out this notebook.
timm model
The timm library contains a large number of pretrained computer vision models.
Those can also be fine-tuned with PEFT. Let’s check out how this works in p
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_10
|
a large number of pretrained computer vision models.
Those can also be fine-tuned with PEFT. Let’s check out how this works in practice.
To start, ensure that timm is installed in the Python environment:
Copied
python -m pip install -U timm
Next we load a timm model for an image classification task:
Copied
import timm
num_classes = ...
model_id = "timm/poolformer_m36.sail_in1k"
model = timm.create_model(model_id, pretrained=True, num_cla
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_11
|
timm
num_classes = ...
model_id = "timm/poolformer_m36.sail_in1k"
model = timm.create_model(model_id, pretrained=True, num_classes=num_classes)
Again, we need to make a decision about what layers to apply LoRA to. Since LoRA supports 2D conv layers, and since
those are a major building block of this model, we should apply LoRA to the 2D conv layers. To identify the names of
those layers, let’s look at all the layer names:
Copied
print([(n,
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_12
|
apply LoRA to the 2D conv layers. To identify the names of
those layers, let’s look at all the layer names:
Copied
print([(n, type(m)) for n, m in MLP().named_modules()])
This will print a very long list, we’ll only show the first few:
Copied
[('', timm.models.metaformer.MetaFormer),
('stem', timm.models.metaformer.Stem),
('stem.conv', torch.nn.modules.conv.Conv2d),
('stem.norm', torch.nn.modules.linear.Identity),
('stages', torch.nn.
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_13
|
mer.Stem),
('stem.conv', torch.nn.modules.conv.Conv2d),
('stem.norm', torch.nn.modules.linear.Identity),
('stages', torch.nn.modules.container.Sequential),
('stages.0', timm.models.metaformer.MetaFormerStage),
('stages.0.downsample', torch.nn.modules.linear.Identity),
('stages.0.blocks', torch.nn.modules.container.Sequential),
('stages.0.blocks.0', timm.models.metaformer.MetaFormerBlock),
('stages.0.blocks.0.norm1', timm.layers.norm.Gro
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_14
|
r.Sequential),
('stages.0.blocks.0', timm.models.metaformer.MetaFormerBlock),
('stages.0.blocks.0.norm1', timm.layers.norm.GroupNorm1),
('stages.0.blocks.0.token_mixer', timm.models.metaformer.Pooling),
('stages.0.blocks.0.token_mixer.pool', torch.nn.modules.pooling.AvgPool2d),
('stages.0.blocks.0.drop_path1', torch.nn.modules.linear.Identity),
('stages.0.blocks.0.layer_scale1', timm.models.metaformer.Scale),
('stages.0.blocks.0.res_scal
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_15
|
ch.nn.modules.linear.Identity),
('stages.0.blocks.0.layer_scale1', timm.models.metaformer.Scale),
('stages.0.blocks.0.res_scale1', torch.nn.modules.linear.Identity),
('stages.0.blocks.0.norm2', timm.layers.norm.GroupNorm1),
('stages.0.blocks.0.mlp', timm.layers.mlp.Mlp),
('stages.0.blocks.0.mlp.fc1', torch.nn.modules.conv.Conv2d),
('stages.0.blocks.0.mlp.act', torch.nn.modules.activation.GELU),
('stages.0.blocks.0.mlp.drop1', torch.nn.mo
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_16
|
les.conv.Conv2d),
('stages.0.blocks.0.mlp.act', torch.nn.modules.activation.GELU),
('stages.0.blocks.0.mlp.drop1', torch.nn.modules.dropout.Dropout),
('stages.0.blocks.0.mlp.norm', torch.nn.modules.linear.Identity),
('stages.0.blocks.0.mlp.fc2', torch.nn.modules.conv.Conv2d),
('stages.0.blocks.0.mlp.drop2', torch.nn.modules.dropout.Dropout),
('stages.0.blocks.0.drop_path2', torch.nn.modules.linear.Identity),
('stages.0.blocks.0.layer_sca
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_17
|
nn.modules.dropout.Dropout),
('stages.0.blocks.0.drop_path2', torch.nn.modules.linear.Identity),
('stages.0.blocks.0.layer_scale2', timm.models.metaformer.Scale),
('stages.0.blocks.0.res_scale2', torch.nn.modules.linear.Identity),
('stages.0.blocks.1', timm.models.metaformer.MetaFormerBlock),
('stages.0.blocks.1.norm1', timm.layers.norm.GroupNorm1),
('stages.0.blocks.1.token_mixer', timm.models.metaformer.Pooling),
('stages.0.blocks.1.to
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_18
|
orm1', timm.layers.norm.GroupNorm1),
('stages.0.blocks.1.token_mixer', timm.models.metaformer.Pooling),
('stages.0.blocks.1.token_mixer.pool', torch.nn.modules.pooling.AvgPool2d),
...
('head.global_pool.flatten', torch.nn.modules.linear.Identity),
('head.norm', timm.layers.norm.LayerNorm2d),
('head.flatten', torch.nn.modules.flatten.Flatten),
('head.drop', torch.nn.modules.linear.Identity),
('head.fc', torch.nn.modules.linear.Linear)]
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_19
|
h.nn.modules.flatten.Flatten),
('head.drop', torch.nn.modules.linear.Identity),
('head.fc', torch.nn.modules.linear.Linear)]
]
Upon closer inspection, we see that the 2D conv layers have names such as "stages.0.blocks.0.mlp.fc1" and
"stages.0.blocks.0.mlp.fc2". How can we match those layer names specifically? You can write a regular
expressions to match the layer names. For our case, the regex
r".*\.mlp\.fc\d" should do the job.
Furthermore,
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_20
|
n write a regular
expressions to match the layer names. For our case, the regex
r".*\.mlp\.fc\d" should do the job.
Furthermore, as in the first example, we should ensure that the output layer, in this case the classification head, is
also updated. Looking at the end of the list printed above, we can see that it’s named 'head.fc'. With that in mind,
here is our LoRA config:
Copied
config = LoraConfig(target_modules=r".*\.mlp\.fc\d", modules_
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_21
|
d 'head.fc'. With that in mind,
here is our LoRA config:
Copied
config = LoraConfig(target_modules=r".*\.mlp\.fc\d", modules_to_save=["head.fc"])
Then we only need to create the PEFT model by passing our base model and the config to get_peft_model:
Copied
peft_model = get_peft_model(model, config)
peft_model.print_trainable_parameters()
# prints trainable params: 1,064,454 || all params: 56,467,974 || trainable%: 1.88505789139876
This sho
|
697d84664f8f500d30e28e9435e7a332.txt
|
697d84664f8f500d30e28e9435e7a332.txt_chunk_22
|
t_trainable_parameters()
# prints trainable params: 1,064,454 || all params: 56,467,974 || trainable%: 1.88505789139876
This shows us that we only need to train less than 2% of all parameters, which is a huge efficiency gain.
For a complete example, check out this notebook.
|
697d84664f8f500d30e28e9435e7a332.txt
|
7d4cf0a1a3b22cb5d3b65cc06163b832.txt_chunk_1
|
Installation
Before you start, you will need to setup your environment, install the appropriate packages, and configure 🤗 PEFT. 🤗 PEFT is tested on Python 3.8+.
🤗 PEFT is available on PyPI, as well as GitHub:
PyPI
To install 🤗 PEFT from PyPI:
Copied
pip install peft
Source
New features that haven’t been released yet are added every day, which also means there may be some bugs. To try them out, install from the GitHub repository:
C
|
7d4cf0a1a3b22cb5d3b65cc06163b832.txt
|
7d4cf0a1a3b22cb5d3b65cc06163b832.txt_chunk_2
|
ased yet are added every day, which also means there may be some bugs. To try them out, install from the GitHub repository:
Copied
pip install git+https://github.com/huggingface/peft
If you’re working on contributing to the library or wish to play with the source code and see live
results as you run the code, an editable version can be installed from a locally-cloned version of the
repository:
Copied
git clone https://github.com/huggingfa
|
7d4cf0a1a3b22cb5d3b65cc06163b832.txt
|
7d4cf0a1a3b22cb5d3b65cc06163b832.txt_chunk_3
|
table version can be installed from a locally-cloned version of the
repository:
Copied
git clone https://github.com/huggingface/peft
cd peft
pip install -e .
|
7d4cf0a1a3b22cb5d3b65cc06163b832.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_1
|
LoRA for semantic similarity tasks
Low-Rank Adaptation (LoRA) is a reparametrization method that aims to reduce the number of trainable parameters with low-rank representations. The weight matrix is broken down into low-rank matrices that are trained and updated. All the pretrained model parameters remain frozen. After training, the low-rank matrices are added back to the original weights. This makes it more efficient to store and train a LoR
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_2
|
er training, the low-rank matrices are added back to the original weights. This makes it more efficient to store and train a LoRA model because there are significantly fewer parameters.
💡 Read LoRA: Low-Rank Adaptation of Large Language Models to learn more about LoRA.
In this guide, we’ll be using a LoRA script to fine-tune a intfloat/e5-large-v2 model on the smangrul/amazon_esci dataset for semantic similarity tasks. Feel free to explore the
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_3
|
e-tune a intfloat/e5-large-v2 model on the smangrul/amazon_esci dataset for semantic similarity tasks. Feel free to explore the script to learn how things work in greater detail!
Setup
Start by installing 🤗 PEFT from source, and then navigate to the directory containing the training scripts for fine-tuning DreamBooth with LoRA:
Copied
cd peft/examples/feature_extraction
Install all the necessary required libraries with:
Copied
pip inst
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_4
|
h with LoRA:
Copied
cd peft/examples/feature_extraction
Install all the necessary required libraries with:
Copied
pip install -r requirements.txt
Setup
Let’s start by importing all the necessary libraries you’ll need:
🤗 Transformers for loading the intfloat/e5-large-v2 model and tokenizer
🤗 Accelerate for the training loop
🤗 Datasets for loading and preparing the smangrul/amazon_esci dataset for training and inference
🤗 Evaluate for ev
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_5
|
training loop
🤗 Datasets for loading and preparing the smangrul/amazon_esci dataset for training and inference
🤗 Evaluate for evaluating the model’s performance
🤗 PEFT for setting up the LoRA configuration and creating the PEFT model
🤗 huggingface_hub for uploading the trained model to HF hub
hnswlib for creating the search index and doing fast approximate nearest neighbor search
It is assumed that PyTorch with CUDA support is already installed
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_6
|
earch index and doing fast approximate nearest neighbor search
It is assumed that PyTorch with CUDA support is already installed.
Train
Launch the training script with accelerate launch and pass your hyperparameters along with the --use_peft argument to enable LoRA.
This guide uses the following LoraConfig:
Copied
peft_config = LoraConfig(
r=8,
lora_alpha=16,
bias="none",
task_type=TaskType.
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_7
|
d
peft_config = LoraConfig(
r=8,
lora_alpha=16,
bias="none",
task_type=TaskType.FEATURE_EXTRACTION,
target_modules=["key", "query", "value"],
)
Here’s what a full set of script arguments may look like when running in Colab on a V100 GPU with standard RAM:
Copied
accelerate launch \
--mixed_precision="fp16" \
peft_lora_embedding_semantic_search.py \
--dataset_name="smangrul/a
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_8
|
AM:
Copied
accelerate launch \
--mixed_precision="fp16" \
peft_lora_embedding_semantic_search.py \
--dataset_name="smangrul/amazon_esci" \
--max_length=70 --model_name_or_path="intfloat/e5-large-v2" \
--per_device_train_batch_size=64 \
--per_device_eval_batch_size=128 \
--learning_rate=5e-4 \
--weight_decay=0.0 \
--num_train_epochs 3 \
--gradient_accumulation_steps=1 \
--output_dir="results/peft_lora_e5_ecommerce_semantic_search_colab" \
--s
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_9
|
num_train_epochs 3 \
--gradient_accumulation_steps=1 \
--output_dir="results/peft_lora_e5_ecommerce_semantic_search_colab" \
--seed=42 \
--push_to_hub \
--hub_model_id="smangrul/peft_lora_e5_ecommerce_semantic_search_colab" \
--with_tracking \
--report_to="wandb" \
--use_peft \
--checkpointing_steps "epoch"
Dataset for semantic similarity
The dataset we’ll be using is a small subset of the esci-data dataset (it can be found on Hub at smangru
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_10
|
r semantic similarity
The dataset we’ll be using is a small subset of the esci-data dataset (it can be found on Hub at smangrul/amazon_esci).
Each sample contains a tuple of (query, product_title, relevance_label) where relevance_label is 1 if the product matches the intent of the query, otherwise it is 0.
Our task is to build an embedding model that can retrieve semantically similar products given a product query.
This is usually the first
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_11
|
is to build an embedding model that can retrieve semantically similar products given a product query.
This is usually the first stage in building a product search engine to retrieve all the potentially relevant products of a given query.
Typically, this involves using Bi-Encoder models to cross-join the query and millions of products which could blow up quickly.
Instead, you can use a Transformer model to retrieve the top K nearest similar prod
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_12
|
ons of products which could blow up quickly.
Instead, you can use a Transformer model to retrieve the top K nearest similar products for a given query by
embedding the query and products in the same latent embedding space.
The millions of products are embedded offline to create a search index.
At run time, only the query is embedded by the model, and products are retrieved from the search index with a
fast approximate nearest neighbor search li
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_13
|
ry is embedded by the model, and products are retrieved from the search index with a
fast approximate nearest neighbor search library such as FAISS or HNSWlib.
The next stage involves reranking the retrieved list of products to return the most relevant ones;
this stage can utilize cross-encoder based models as the cross-join between the query and a limited set of retrieved products.
The diagram below from awesome-semantic-search outlines a roug
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_14
|
s-join between the query and a limited set of retrieved products.
The diagram below from awesome-semantic-search outlines a rough semantic search pipeline:
For this task guide, we will explore the first stage of training an embedding model to predict semantically similar products
given a product query.
Training script deep dive
We finetune e5-large-v2 which tops the MTEB benchmark using PEFT-LoRA.
AutoModelForSentenceEmbedding returns the
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_15
|
t deep dive
We finetune e5-large-v2 which tops the MTEB benchmark using PEFT-LoRA.
AutoModelForSentenceEmbedding returns the query and product embeddings, and the mean_pooling function pools them across the sequence dimension and normalizes them:
Copied
class AutoModelForSentenceEmbedding(nn.Module):
def __init__(self, model_name, tokenizer, normalize=True):
super(AutoModelForSentenceEmbedding, self).__init__()
self.m
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_16
|
it__(self, model_name, tokenizer, normalize=True):
super(AutoModelForSentenceEmbedding, self).__init__()
self.model = AutoModel.from_pretrained(model_name)
self.normalize = normalize
self.tokenizer = tokenizer
def forward(self, **kwargs):
model_output = self.model(**kwargs)
embeddings = self.mean_pooling(model_output, kwargs["attention_mask"])
if self.normalize:
embeddi
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_17
|
s)
embeddings = self.mean_pooling(model_output, kwargs["attention_mask"])
if self.normalize:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
return embeddings
def mean_pooling(self, model_output, attention_mask):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(to
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_18
|
First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
5c1fa8dadc86e3a282900d1df405f9cb.txt_chunk_19
|
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.model, name)
def get_cosine_embeddings(query_embs, product_embs):
return torch.sum(query_embs * product_embs, axis=1)
def get_loss(cosine_score, labels):
return torch.mean(torch.square(labels * (1 - cosine_score) + torch.cl
|
5c1fa8dadc86e3a282900d1df405f9cb.txt
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.