Spaces:
Sleeping
Sleeping
"""rotary_embedding.py - Rotary Embedding based on https://github.com/lucidrains/rotary-embedding-torch""" | |
from typing import Literal, Union, Optional | |
from math import pi, log | |
from einops import rearrange, repeat | |
import torch | |
from torch.nn import Module, ModuleList | |
from torch.cuda.amp import autocast | |
from torch import nn, einsum, broadcast_tensors, Tensor | |
# helper functions | |
def exists(val): | |
return val is not None | |
def default(val, d): | |
return val if exists(val) else d | |
# broadcat, as tortoise-tts was using it | |
def broadcat(tensors, dim=-1): | |
broadcasted_tensors = broadcast_tensors(*tensors) | |
return torch.cat(broadcasted_tensors, dim=dim) | |
# rotary embedding helper functions | |
def rotate_half(x): | |
x = rearrange(x, '... (d r) -> ... d r', r=2) | |
x1, x2 = x.unbind(dim=-1) | |
x = torch.stack((-x2, x1), dim=-1) | |
return rearrange(x, '... d r -> ... (d r)') | |
def apply_rotary_emb(freqs, t, start_index=0, scale=1., seq_dim=-2): | |
"""Applies rotary embedding for pixels.""" | |
if t.ndim == 3: | |
seq_len = t.shape[seq_dim] | |
freqs = freqs[-seq_len:].to(t) | |
rot_dim = freqs.shape[-1] | |
end_index = start_index + rot_dim | |
assert rot_dim <= t.shape[ | |
-1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}' | |
t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:] | |
t = (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale) | |
return torch.cat((t_left, t, t_right), dim=-1) | |
# learned rotation helpers | |
def apply_learned_rotations(rotations, t, start_index=0, freq_ranges=None): | |
if exists(freq_ranges): | |
rotations = einsum('..., f -> ... f', rotations, freq_ranges) | |
rotations = rearrange(rotations, '... r f -> ... (r f)') | |
rotations = repeat(rotations, '... n -> ... (n r)', r=2) | |
return apply_rotary_emb(rotations, t, start_index=start_index) | |
# classes | |
class RotaryEmbedding(Module): | |
def __init__(self, | |
dim, | |
custom_freqs: Optional[Tensor] = None, | |
freqs_for: Union[Literal['lang'], Literal['pixel'], Literal['constant']] = 'lang', | |
theta=10000, | |
max_freq=10, | |
num_freqs=1, | |
learned_freq=False, | |
use_xpos=False, | |
xpos_scale_base=512, | |
interpolate_factor=1., | |
theta_rescale_factor=1., | |
seq_before_head_dim=False, | |
cache_if_possible=True): | |
super().__init__() | |
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning | |
# has some connection to NTK literature | |
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ | |
theta *= theta_rescale_factor**(dim / (dim - 2)) | |
self.freqs_for = freqs_for | |
if exists(custom_freqs): | |
freqs = custom_freqs | |
elif freqs_for == 'lang': | |
freqs = 1. / (theta**(torch.arange(0, dim, 2)[:(dim // 2)].float() / dim)) | |
elif freqs_for == 'pixel': | |
freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi | |
elif freqs_for == 'constant': | |
freqs = torch.ones(num_freqs).float() | |
self.cache_if_possible = cache_if_possible | |
self.tmp_store('cached_freqs', None) | |
self.tmp_store('cached_scales', None) | |
self.freqs = nn.Parameter(freqs, requires_grad=learned_freq) | |
self.learned_freq = learned_freq | |
# dummy for device | |
self.tmp_store('dummy', torch.tensor(0)) | |
# default sequence dimension | |
self.seq_before_head_dim = seq_before_head_dim | |
self.default_seq_dim = -3 if seq_before_head_dim else -2 | |
# interpolation factors | |
assert interpolate_factor >= 1. | |
self.interpolate_factor = interpolate_factor | |
# xpos | |
self.use_xpos = use_xpos | |
if not use_xpos: | |
self.tmp_store('scale', None) | |
return | |
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim) | |
self.scale_base = xpos_scale_base | |
self.tmp_store('scale', scale) | |
def device(self): | |
return self.dummy.device | |
def tmp_store(self, key, value): | |
self.register_buffer(key, value, persistent=False) | |
def get_seq_pos(self, seq_len, device, dtype, offset=0): | |
return (torch.arange(seq_len, device=device, dtype=dtype) + offset) / self.interpolate_factor | |
def rotate_queries_or_keys(self, t, seq_dim=None, offset=0, freq_seq_len=None): | |
seq_dim = default(seq_dim, self.default_seq_dim) | |
assert not self.use_xpos, 'you must use `.rotate_queries_and_keys` method instead and pass in both queries and keys, for length extrapolatable rotary embeddings' | |
device, dtype, seq_len = t.device, t.dtype, t.shape[seq_dim] | |
if exists(freq_seq_len): | |
assert freq_seq_len >= seq_len | |
seq_len = freq_seq_len | |
freqs = self.forward(self.get_seq_pos(seq_len, device=device, dtype=dtype, offset=offset), | |
seq_len=seq_len, | |
offset=offset) | |
if seq_dim == -3: | |
freqs = rearrange(freqs, 'n d -> n 1 d') | |
return apply_rotary_emb(freqs, t, seq_dim=seq_dim) | |
def rotate_queries_with_cached_keys(self, q, k, seq_dim=None, offset=0): | |
seq_dim = default(seq_dim, self.default_seq_dim) | |
q_len, k_len = q.shape[seq_dim], k.shape[seq_dim] | |
assert q_len <= k_len | |
rotated_q = self.rotate_queries_or_keys(q, seq_dim=seq_dim, freq_seq_len=k_len) | |
rotated_k = self.rotate_queries_or_keys(k, seq_dim=seq_dim) | |
rotated_q = rotated_q.type(q.dtype) | |
rotated_k = rotated_k.type(k.dtype) | |
return rotated_q, rotated_k | |
def rotate_queries_and_keys(self, q, k, seq_dim=None): | |
seq_dim = default(seq_dim, self.default_seq_dim) | |
assert self.use_xpos | |
device, dtype, seq_len = q.device, q.dtype, q.shape[seq_dim] | |
seq = self.get_seq_pos(seq_len, dtype=dtype, device=device) | |
freqs = self.forward(seq, seq_len=seq_len) | |
scale = self.get_scale(seq, seq_len=seq_len).to(dtype) | |
if seq_dim == -3: | |
freqs = rearrange(freqs, 'n d -> n 1 d') | |
scale = rearrange(scale, 'n d -> n 1 d') | |
rotated_q = apply_rotary_emb(freqs, q, scale=scale, seq_dim=seq_dim) | |
rotated_k = apply_rotary_emb(freqs, k, scale=scale**-1, seq_dim=seq_dim) | |
rotated_q = rotated_q.type(q.dtype) | |
rotated_k = rotated_k.type(k.dtype) | |
return rotated_q, rotated_k | |
def get_scale(self, t: Tensor, seq_len: Optional[int] = None, offset=0): | |
assert self.use_xpos | |
should_cache = (self.cache_if_possible and exists(seq_len)) | |
if ( | |
should_cache and \ | |
exists(self.cached_scales) and \ | |
(seq_len + offset) <= self.cached_scales.shape[0] | |
): | |
return self.cached_scales[offset:(offset + seq_len)] | |
scale = 1. | |
if self.use_xpos: | |
power = (t - len(t) // 2) / self.scale_base | |
scale = self.scale**rearrange(power, 'n -> n 1') | |
scale = torch.cat((scale, scale), dim=-1) | |
if should_cache: | |
self.tmp_store('cached_scales', scale) | |
return scale | |
def get_axial_freqs(self, *dims): | |
Colon = slice(None) | |
all_freqs = [] | |
for ind, dim in enumerate(dims): | |
if self.freqs_for == 'pixel': | |
pos = torch.linspace(-1, 1, steps=dim, device=self.device) | |
else: | |
pos = torch.arange(dim, device=self.device) | |
freqs = self.forward(pos, seq_len=dim) | |
all_axis = [None] * len(dims) | |
all_axis[ind] = Colon | |
new_axis_slice = (Ellipsis, *all_axis, Colon) | |
all_freqs.append(freqs[new_axis_slice]) | |
all_freqs = broadcast_tensors(*all_freqs) | |
return torch.cat(all_freqs, dim=-1) | |
def forward(self, t: Tensor, seq_len=None, offset=0): | |
should_cache = ( | |
self.cache_if_possible and \ | |
not self.learned_freq and \ | |
exists(seq_len) and \ | |
self.freqs_for != 'pixel' | |
) | |
if ( | |
should_cache and \ | |
exists(self.cached_freqs) and \ | |
(offset + seq_len) <= self.cached_freqs.shape[0] | |
): | |
return self.cached_freqs[offset:(offset + seq_len)].detach() | |
freqs = self.freqs | |
freqs = einsum('..., f -> ... f', t.type(freqs.dtype), freqs) | |
freqs = repeat(freqs, '... n -> ... (n r)', r=2) | |
if should_cache: | |
self.tmp_store('cached_freqs', freqs.detach()) | |
return freqs | |
# custom method for applying rotary embeddings | |
def apply_rotary_custom(self, t: torch.Tensor): | |
"""Apply rotary embeddings to queries and keys, if k is None, only q is rotated. | |
Depending on the freqs type, the rotation will be different.""" | |
if self.freqs_for == 'lang': | |
return self.rotate_queries_or_keys(t, seq_dim=-2) | |
elif self.freqs_for == 'pixel': | |
return apply_rotary_emb(self.get_axial_freqs(t.shape[-2]), t) | |
else: | |
raise ValueError(f"freqs_for must be 'lang' or 'pixel', but got {self.freqs_for}") | |
def test_rotary_embedding_lang(): | |
d = 32 # d by head | |
q = torch.ones(1, 4, 110, 32) # (B, H, T, D) for multi-head attention | |
rdim = d // 2 # will do a partial rotation on half, or d | |
rotary = RotaryEmbedding(dim=rdim, freqs_for="lang") | |
q = rotary.rotate_queries_or_keys(q, seq_dim=-2) | |
# visualize | |
import matplotlib.pyplot as plt | |
plt.imshow(q[0, 0, :, :].numpy().T, origin='lower') | |
def test_rotary_embedding_pixel(): | |
d = 32 # d by head | |
q = torch.ones(1, 4, 128, 32) # (B*T, H, F, C/H) for multi-head attention | |
rdim = d // 2 # will do a partial rotation on half | |
rotary = RotaryEmbedding(dim=rdim, freqs_for="pixel", max_freq=10) | |
freqs = rotary.get_axial_freqs(128) | |
q = apply_rotary_emb(freqs, q) # also k, if needed | |
# visualize | |
import matplotlib.pyplot as plt | |
plt.imshow(q[0, 0, :, :].numpy().T, origin='lower') | |