File size: 17,142 Bytes
7d52396 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 |
"""Implementation of the paper:
LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention
https://arxiv.org/abs/2303.16199
| Prefix cross-attention
|
โโโโโโโโโโโโโโโโโโโ | โโโโโโโโโโโโโโโโโโโโ
โ x โ | โ prefix โ
โโโโโโโโโโโโโโโโโโโ | โโโโโโโโโโโโโโโโโโโโ
| | |
โผ | โผ
โโโโโโโโโโโโโโโโโโโโ | โโโโโโโโโโโโโโโโโโโโโโโ
โ self-attention โ --------------------------------------------------------------โ โ linear projection โ
โโโโโโโโโโโโโโโโโโโโ | โ โโโโโโโโโโโโโโโโโโโโโโโ
| | โ | \
โผ | โผ โผ โผ
โญโโโโฎ โโโโโโโโโโโโโโโโโโ โญโโโโฎ โโโโโโโโโโโโโโโโโโโโโโโโโโโโ | โโโโโโโโโโโ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโ
โ + โ โโโ โ gating factor โ-โ x โ-โ prefix cross-attention โ | โ query โ โ prefix key โ โ prefix value โ
โฐโโโโฏ โโโโโโโโโโโโโโโโโโ โฐโโโโฏ โโโโโโโโโโโโโโโโโโโโโโโโโโโโ | โโโโโโโโโโโ โโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโ
| | \ | /
โผ | โผ โผ โผ
| โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
| โ scaled dot-product attention โ
| โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
In order to inject learnable information from the prefix to pretrained weights we need to sum outputs from
self-attention and prefix cross-attention (times gating factor). For prefix cross-attention we need `query` (from
self-attention as a result of linear projection), `prefix key` and `prefix value` (from cross-attention as a result of
linear projection).
The output of prefix cross-attention is multiplied by gating factor, which is a learnable parameter that is needed to
avoid potential disruption of pretrained weights caused by incorporating randomly initialized tensors. This factor is
initialized with zeros to avoid noise from the adaption prompts at the early training stage.
More about it: https://lightning.ai/pages/community/article/understanding-llama-adapters/
Notes about implementation: as per paper adapter's prefix is concatenated with the input, while here outputs of
self-attention and prefix cross-attention are summed. Both variants are mathematically equivalent:
https://github.com/ZrrSkywalker/LLaMA-Adapter/issues/47
"""
# mypy: ignore-errors
from dataclasses import dataclass
from typing import Optional, Tuple, List, Union
import torch
import torch.nn as nn
from torch.nn import functional as F
import lit_llama.model as llama
from lit_llama.model import build_rope_cache, apply_rope, RMSNorm, MLP, KVCache, RoPECache
@dataclass
class LLaMAConfig(llama.LLaMAConfig):
adapter_prompt_length: int = 10
adapter_start_layer: int = 2
class CausalSelfAttention(nn.Module):
"""A modification of `lit_llama.model.CausalSelfAttention` that adds the attention
over the adaption prompt."""
def __init__(self, config: LLaMAConfig, block_idx: int) -> None:
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads, but in a batch
self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=False)
# output projection
self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=False)
if block_idx >= config.adapter_start_layer:
# adapter embedding layer
self.adapter_wte = nn.Embedding(config.adapter_prompt_length, config.n_embd)
# a learnable gating factor (to avoid potential disruption of pretrained weights) initialized with zeros (to
# avoid noise from adaption prompts at the early training stage)
self.gating_factor = torch.nn.Parameter(torch.zeros(1, config.n_head, 1, 1))
self.n_head = config.n_head
self.n_embd = config.n_embd
self.block_size = config.block_size
self.block_idx = block_idx
self.adapter_prompt_length = config.adapter_prompt_length
self.adapter_start_layer = config.adapter_start_layer
def forward(
self,
x: torch.Tensor,
rope: RoPECache,
mask: torch.Tensor,
max_seq_length: int,
input_pos: Optional[torch.Tensor] = None,
kv_cache: Optional[KVCache] = None,
adapter_kv_cache: Optional[KVCache] = None,
) -> Tuple[torch.Tensor, Optional[KVCache], Optional[KVCache]]:
# notation:
# - B | batch
# - T | time-step (sequence length)
# - C | embeddings size (n_embd) = head size * num heads
# - hs | head size
# - nh | number of heads
B, T, C = x.size()
# instead of calculating `query`, `key` and `value` by separately multiplying input `x` with corresponding
# weight matrices do it (for all heads) in a single multiplication with a matrix of 3x size (concatenated
# weights for q, k, v) and then split the result along `embedding size` dimension
q, k, v = self.c_attn(x).split(self.n_embd, dim=2) # (B, T, 3 * C) --> 3 * (B, T, C)
# in order to move head_size (hs) dimension right after batch (B) dimension, we need to first split
# embedding size (C) dimension into num_heads (nh) and head_size (hs)
head_size = C // self.n_head
k = k.view(B, T, self.n_head, head_size)
q = q.view(B, T, self.n_head, head_size)
v = v.view(B, T, self.n_head, head_size)
# "Unlike standard positional embeddings rotary embeddings must be applied at every layer"
q = apply_rope(q, rope) # (B, T, nh, hs)
k = apply_rope(k, rope) # (B, T, nh, hs)
# now `key`, 'query` and `value` tensors are correctly represented: for each element in a batch (B)
# there is a number of heads (nh) and for each head there is a sequence of elements (T), each of them is
# represented by a vector of size `hs`
k = k.transpose(1, 2) # (B, nh, T, hs)
q = q.transpose(1, 2) # (B, nh, T, hs)
v = v.transpose(1, 2) # (B, nh, T, hs)
if kv_cache is not None:
cache_k, cache_v = kv_cache # 2 * (B, nh, max_seq_length, hs)
# check if reached token limit
if input_pos[-1] >= max_seq_length:
# if we reached token limit and thus there is no space to put newly calculated `key` and `value`
# right next to cached ones, we need to rotate cache tensor along `max_seq_length` dimension by one
# element to the left: this will free up space for new `key` and `value`
input_pos = torch.tensor(max_seq_length - 1, device=input_pos.device)
# shift 1 position to the left
cache_k = torch.roll(cache_k, -1, dims=2)
cache_v = torch.roll(cache_v, -1, dims=2)
k = cache_k.index_copy(2, input_pos, k) # (B, nh, max_seq_length, hs)
v = cache_v.index_copy(2, input_pos, v) # (B, nh, max_seq_length, hs)
kv_cache = k, v
# efficient attention using Flash Attention CUDA kernels
# โ (B, nh, T, hs) @ (B, nh, T, hs).mT --> (B, nh, T, T) @ (B, nh, T, hs) --> (B, nh, T, hs)
y = F.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0) # (B, nh, T, hs)
# "Adapters are applied to the topmost layers to better tune the language
# representations with higher-level semantics".
if self.block_idx >= self.adapter_start_layer:
if adapter_kv_cache is not None:
ak, av = adapter_kv_cache # 2 * (B, nh, aT, hs)
else:
prefix = self.adapter_wte.weight.reshape(1, self.adapter_prompt_length, self.n_embd)
aT = prefix.size(1)
_, ak, av = self.c_attn(prefix).split(self.n_embd, dim=2) # (1, aT, 3 * C) --> 3 * (1, aT, C)
ak = ak.view(1, aT, self.n_head, head_size).repeat(B, 1, 1, 1).transpose(1, 2) # (B, nh, aT, hs)
av = av.view(1, aT, self.n_head, head_size).repeat(B, 1, 1, 1).transpose(1, 2) # (B, nh, aT, hs)
adapter_kv_cache = (ak, av)
# Apply cross-attention with `query`, `adapter_key`, `adapter_value` and sum the output with the output
# obtained from self-attention step. This is mathematically equivalent to concatenation of prefix and input as per paper.
amask = torch.ones(q.shape[-2], ak.shape[-2], dtype=torch.bool, device=x.device) # (T, aT)
# โ (B, nh, T, hs) @ (B, nh, aT, hs).mT --> (B, nh, T, aT) @ (B, nh, aT, hs) --> (B, nh, T, hs)
ay = F.scaled_dot_product_attention(q, ak, av, attn_mask=amask, dropout_p=0.0, is_causal=False) # (B, nh, T, hs)
y = y + self.gating_factor * ay
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.c_proj(y) # (B, T, C)
return y, kv_cache, adapter_kv_cache
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
"""For backward compatibility with old checkpoints that have a single gating value for all heads."""
name = prefix + "gating_factor"
if name in state_dict:
tensor = state_dict[name]
# in case we are loading with `utils.lazy_load()`
tensor = tensor._load_tensor() if hasattr(tensor, "_load_tensor") else tensor
if len(tensor.shape) < 4:
# For old checkpoints with unified gating value
state_dict[name] = tensor.reshape(1, 1, 1, 1).repeat(1, self.n_head, 1, 1)
else:
state_dict[name] = tensor
return super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
class Block(nn.Module):
"""The implementation is identical to `lit_llama.model.Block` with the exception that
we replace the attention layer where adaption is implemented."""
def __init__(self, config: LLaMAConfig, block_idx: int) -> None:
super().__init__()
self.rms_1 = RMSNorm(config.n_embd)
self.attn = CausalSelfAttention(config, block_idx)
self.rms_2 = RMSNorm(config.n_embd)
self.mlp = MLP(config)
def forward(
self,
x: torch.Tensor,
rope: RoPECache,
mask: torch.Tensor,
max_seq_length: int,
input_pos: Optional[torch.Tensor] = None,
kv_cache: Optional[KVCache] = None,
adapter_kv_cache: Optional[KVCache] = None,
) -> Tuple[torch.Tensor, Optional[KVCache], Optional[KVCache]]:
h, new_kv_cache, new_adapter_kv_cache = self.attn(
self.rms_1(x), rope, mask, max_seq_length, input_pos, kv_cache, adapter_kv_cache
)
x = x + h
x = x + self.mlp(self.rms_2(x))
return x, new_kv_cache, new_adapter_kv_cache
class LLaMA(llama.LLaMA):
"""The implementation is identical to `lit_llama.model.LLaMA` with the exception that
the `Block` saves the layer index and passes it down to the attention layer."""
def __init__(self, config: LLaMAConfig) -> None:
nn.Module.__init__(self)
assert config.vocab_size is not None
assert config.block_size is not None
self.config = config
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.transformer = nn.ModuleDict(
dict(
wte=nn.Embedding(config.vocab_size, config.n_embd),
h=nn.ModuleList(Block(config, i) for i in range(config.n_layer)),
ln_f=RMSNorm(config.n_embd),
)
)
self.rope_cache: Optional[RoPECache] = None
self.mask_cache: Optional[torch.Tensor] = None
self.kv_caches: List[KVCache] = []
self.adapter_kv_caches: List[KVCache] = []
@classmethod
def from_name(cls, name: str):
return cls(LLaMAConfig.from_name(name))
def reset_cache(self) -> None:
super().reset_cache()
self.adapter_kv_caches.clear()
def forward(
self, idx: torch.Tensor, max_seq_length: Optional[int] = None, input_pos: Optional[torch.Tensor] = None
) -> Union[torch.Tensor, Tuple[torch.Tensor, List[KVCache]]]:
B, T = idx.size()
block_size = self.config.block_size
if max_seq_length is None:
max_seq_length = block_size
assert T <= max_seq_length, f"Cannot forward sequence of length {T}, max seq length is only {max_seq_length}"
assert max_seq_length <= block_size, f"Cannot attend to {max_seq_length}, block size is only {block_size}"
assert T <= block_size, f"Cannot forward sequence of length {T}, block size is only {block_size}"
if self.rope_cache is None:
self.rope_cache = self.build_rope_cache(idx) # (block_size, head_size / 2, 2)
if self.mask_cache is None:
self.mask_cache = self.build_mask_cache(idx) # (1, 1, block_size, block_size)
if input_pos is not None:
rope = self.rope_cache.index_select(0, input_pos)
mask = self.mask_cache.index_select(2, input_pos)
mask = mask[:, :, :, :max_seq_length]
else:
rope = self.rope_cache[:T]
mask = self.mask_cache[:, :, :T, :T]
# forward the model itself
x = self.transformer.wte(idx) # token embeddings of shape (B, T, n_embd)
if input_pos is None: # proxy for use_cache=False
for block in self.transformer.h:
x, *_ = block(x, rope, mask, max_seq_length)
else:
if not self.kv_caches:
head_size = self.config.n_embd // self.config.n_head
cache_shape = (B, self.config.n_head, max_seq_length, head_size)
self.kv_caches = [
(torch.zeros(cache_shape, device=x.device, dtype=x.dtype), torch.zeros(cache_shape, device=x.device, dtype=x.dtype))
for _ in range(self.config.n_layer)
]
if not self.adapter_kv_caches:
self.adapter_kv_caches = [None for _ in range(self.config.n_layer)]
for i, block in enumerate(self.transformer.h):
x, self.kv_caches[i], self.adapter_kv_caches[i] = block(
x, rope, mask, max_seq_length, input_pos, self.kv_caches[i], self.adapter_kv_caches[i]
)
x = self.transformer.ln_f(x) # (B, T, n_embd)
logits = self.lm_head(x) # (B, T, vocab_size)
return logits
def mark_only_adapter_as_trainable(model: LLaMA) -> None:
"""Sets `requires_grad=False` for all non-adapter weights."""
for name, param in model.named_parameters():
param.requires_grad = "adapter_wte" in name or "gating_factor" in name
def adapter_state_from_state_dict(state_dict: dict) -> dict:
"""Returns the model state dict with only the adapter weights for saving."""
return {name: param for name, param in state_dict.items() if "adapter_wte" in name or "gating_factor" in name}
|