python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
from setuptools import setup, find_packages
setup(
name = 'Mega-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.0',
license='MIT',
description = 'Mega - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/Mega-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'attention mechanism',
'exponential moving average',
'long range arena'
],
install_requires=[
'einops>=0.4',
'scipy',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| Mega-pytorch-main | setup.py |
from mega_pytorch.mega_pytorch import Mega
from mega_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = Mega(
num_tokens = 256,
dim = 512,
depth = 8
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
x = np.array(np.frombuffer(file.read(int(95e6)), dtype = np.uint8))
train_x, valid_x = np.split(x, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f"\n\n {prime} \n\n {'-' * 80} \n")
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str + "\n\n")
| Mega-pytorch-main | train.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.fft import rfft, irfft
from einops import rearrange
from einops.layers.torch import Rearrange
from scipy.fftpack import next_fast_len
# functions
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def default(val, d):
return val if exists(val) else d
def append_dims(x, num_dims):
if num_dims <= 0:
return x
return x.view(*x.shape, *((1,) * num_dims))
def conv1d_fft(x, weights, dim = -2, weight_dim = -1):
# O(N log(N)) 1d convolution using some fourier trick
assert weight_dim >= dim
N = x.shape[dim]
M = weights.shape[weight_dim]
fast_len = next_fast_len(N + M - 1)
f_x = rfft(x, n = fast_len, dim = dim)
f_weight = rfft(weights, n = fast_len, dim = weight_dim)
f_v_weight = f_x * append_dims(f_weight.conj(), weight_dim - dim)
out = irfft(f_v_weight, fast_len, dim = dim)
out = out.roll(-1, dims = (dim,))
indices = torch.arange(start = fast_len - N, end = fast_len, dtype = torch.long, device = x.device)
out = out.index_select(dim, indices)
return out
# positional bias for single-headed attention
class T5RelativePositionBias(nn.Module):
def __init__(
self,
scale,
causal = False,
num_buckets = 32,
max_distance = 128
):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, 1)
@staticmethod
def _relative_position_bucket(
relative_position,
causal = True,
num_buckets = 32,
max_distance = 128
):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def forward(self, x):
i, j, device = *x.shape[-2:], x.device
q_pos = torch.arange(i, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = rearrange(k_pos, 'j -> 1 j') - rearrange(q_pos, 'i -> i 1')
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j 1 -> i j')
return bias * self.scale
# classes
class LaplacianAttnFn(nn.Module):
def forward(self, x):
mu = math.sqrt(0.5)
std = math.sqrt((4 * math.pi) ** -1)
return (1 + torch.special.erf((x - mu) / (std * math.sqrt(2)))) * 0.5
class OffsetScale(nn.Module):
def __init__(self, dim, heads = 1):
super().__init__()
self.gamma = nn.Parameter(torch.ones(heads, dim))
self.beta = nn.Parameter(torch.zeros(heads, dim))
nn.init.normal_(self.gamma, std = 0.02)
def forward(self, x):
out = einsum('... d, h d -> ... h d', x, self.gamma) + self.beta
return out.unbind(dim = -2)
class SingleHeadedAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_qk,
dim_value,
causal = False,
laplacian_attn_fn = False
):
super().__init__()
self.causal = causal
self.laplacian_attn_fn = laplacian_attn_fn
self.attn_fn = partial(F.softmax, dim = -1) if not laplacian_attn_fn else LaplacianAttnFn()
self.rel_pos_bias = T5RelativePositionBias(causal = causal, scale = dim_qk ** 0.5)
self.to_qk = nn.Sequential(
nn.Linear(dim, dim_qk),
nn.SiLU()
)
self.offsetscale = OffsetScale(dim_qk, heads = 2)
self.to_v = nn.Sequential(
nn.Linear(dim, dim_value),
nn.SiLU()
)
def forward(self, x, v_input = None):
seq_len, dim, device, dtype = *x.shape[-2:], x.device, x.dtype
v_input = default(v_input, x)
qk, v = self.to_qk(x), self.to_v(v_input)
q, k = self.offsetscale(qk)
scale = (seq_len ** -1) if self.laplacian_attn_fn else (dim ** -0.5)
sim = einsum('b i d, b j d -> b i j', q, k) * scale
sim = sim + self.rel_pos_bias(sim)
if self.causal:
causal_mask = torch.ones((seq_len, seq_len), device = device, dtype = torch.bool).triu(1)
if self.causal and not self.laplacian_attn_fn:
# is softmax attention and using large negative value pre-softmax
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = self.attn_fn(sim)
if self.causal and self.laplacian_attn_fn:
# if using laplacian attention function, zero out upper triangular with 0s
attn = attn.masked_fill(causal_mask, 0.)
return einsum('b i j, b j d -> b i d', attn, v)
class MultiHeadedEMA(nn.Module):
def __init__(
self,
*,
dim,
heads,
bidirectional = False,
norm_mhesa_heads = False
):
super().__init__()
self.bidirectional = bidirectional
self.expansion = nn.Parameter(torch.randn(heads * (2 if bidirectional else 1), dim))
self.reduction = nn.Parameter(torch.randn(heads * (2 if bidirectional else 1), dim))
# learned alpha and dampening factors
self.alphas = nn.Parameter(torch.randn(heads))
self.dampen_factors = nn.Parameter(torch.randn(heads))
if bidirectional:
self.reverse_alphas = nn.Parameter(torch.randn(heads))
self.reverse_dampen_factors = nn.Parameter(torch.randn(heads))
self.heads = heads
self.norm_heads = nn.Identity()
if norm_mhesa_heads:
# https://arxiv.org/abs/2210.06423 - retnet used sub-ln with some success as groupnorm
self.norm_heads = nn.Sequential(
Rearrange('b n h d -> b (h d) n'),
nn.GroupNorm(heads, dim * heads),
Rearrange('b (h d) n -> b n h d', h = heads)
)
def forward(self, x):
device, seq_len = x.device, x.shape[1]
# project in and split heads
x = einsum('... d, h d -> ... h d', x, self.expansion)
if self.bidirectional:
x, x_reversed = x.chunk(2, dim = -2)
x_reversed = torch.flip(x_reversed, dims = (1,))
# weights derived from alphas (learned exponential smoothing decay rate)
def apply_learned_ema_with_damping(x, alphas, dampen_factors):
alphas = alphas.sigmoid()
dampen_factors = dampen_factors.sigmoid()
reversed_powers = torch.arange(seq_len - 1, -1, -1, device = device)
K = alphas * (((1 - alphas) * dampen_factors) ** rearrange(reversed_powers, '... l -> ... l 1'))
# conv1d fft O(nlog(n))
return conv1d_fft(x, K, dim = -3, weight_dim = -2)
x = apply_learned_ema_with_damping(x, self.alphas, self.dampen_factors)
if self.bidirectional:
x_reversed = apply_learned_ema_with_damping(x_reversed, self.reverse_alphas, self.reverse_dampen_factors)
x_reversed = torch.flip(x_reversed, dims = (1,))
x = torch.cat((x, x_reversed), dim = -2)
# maybe norm heads
x = self.norm_heads(x)
# combine heads and out
return einsum('... h d, h d -> ... d', x, self.reduction)
# Mega Layer
# Single headed Attention + Multi-headed EMA, then GRU-esque gating
class MegaLayer(nn.Module):
def __init__(
self,
*,
dim = 128,
ema_heads = 16,
attn_dim_qk = 64,
attn_dim_value = 256,
laplacian_attn_fn = False,
causal = True,
norm_mhesa_heads = False
):
super().__init__()
self.single_headed_attn = SingleHeadedAttention(
dim = dim,
dim_qk = attn_dim_qk,
dim_value = attn_dim_value,
causal = causal,
laplacian_attn_fn = laplacian_attn_fn
)
self.multi_headed_ema = MultiHeadedEMA(
dim = dim,
heads = ema_heads,
bidirectional = not causal,
norm_mhesa_heads = norm_mhesa_heads
)
self.to_reset_gate = nn.Sequential(
nn.Linear(dim, attn_dim_value),
nn.SiLU()
)
self.to_update_gate = nn.Sequential(
nn.Linear(dim, dim),
nn.Sigmoid()
)
# equation 14, for calculating H
self.Wh = nn.Parameter(torch.randn(dim, dim))
self.Uh = nn.Parameter(torch.randn(attn_dim_value, dim))
self.bh = nn.Parameter(torch.randn(dim))
def forward(self, x, residual = None):
residual = default(residual, x)
ema_output = self.multi_headed_ema(x)
attn_output = self.single_headed_attn(ema_output, x)
reset_gate = self.to_reset_gate(ema_output)
update_gate = self.to_update_gate(ema_output)
gated_attn_output = attn_output * reset_gate
# equation 14
H = F.silu(ema_output @ self.Wh + gated_attn_output @ self.Uh + self.bh)
# update gate
return update_gate * H + (1 - update_gate) * residual
# Mega
def FeedForward(dim, ff_mult):
dim_hidden = int(dim * ff_mult)
return nn.Sequential(
nn.Linear(dim, dim_hidden),
nn.GELU(),
nn.Linear(dim_hidden, dim)
)
class Mega(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
depth,
ff_mult = 2,
pre_norm = False,
**kwargs
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.pre_norm = pre_norm
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
MegaLayer(dim = dim, **kwargs),
nn.LayerNorm(dim),
FeedForward(dim = dim, ff_mult = ff_mult),
nn.LayerNorm(dim)
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(dim) if pre_norm else nn.Identity(),
nn.Linear(dim, num_tokens)
)
def forward(self, x):
pre_norm = self.pre_norm
post_norm = not self.pre_norm
x = self.token_emb(x)
for mega_layer, mega_norm, ff, ff_norm in self.layers:
mega_maybe_prenorm = mega_norm if pre_norm else identity
ff_maybe_prenorm = ff_norm if pre_norm else identity
mega_maybe_postnorm = mega_norm if post_norm else identity
ff_maybe_postnorm = ff_norm if post_norm else identity
x = mega_layer(mega_maybe_prenorm(x), x)
x = mega_maybe_postnorm(x)
x = ff(ff_maybe_prenorm(x)) + x
x = ff_maybe_postnorm(x)
return self.to_logits(x)
| Mega-pytorch-main | mega_pytorch/mega_pytorch.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.net = net
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, temperature = 1., filter_thres = 0.9, **kwargs):
b, t, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
logits = self.net(out, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
out = out[:, t:]
return out
def forward(self, x, **kwargs):
x_inp, x_labels = x[:, :-1], x[:, 1:]
logits = self.net(x_inp, **kwargs)
return F.cross_entropy(rearrange(logits, 'b c n -> b n c'), x_labels)
| Mega-pytorch-main | mega_pytorch/autoregressive_wrapper.py |
from mega_pytorch.mega_pytorch import MegaLayer, Mega, MultiHeadedEMA
| Mega-pytorch-main | mega_pytorch/__init__.py |
import sys
from setuptools import setup, find_packages
sys.path[0:0] = ['deep_daze']
from version import __version__
setup(
name = 'deep-daze',
packages = find_packages(),
include_package_data = True,
entry_points={
'console_scripts': [
'imagine = deep_daze.cli:main',
],
},
version = __version__,
license='MIT',
description = 'Deep Daze',
author = 'Ryan Murdock, Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/deep-daze',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'implicit neural representations',
'text to image'
],
install_requires=[
'einops>=0.3',
'fire',
'ftfy',
'imageio>=2.9.0',
'siren-pytorch>=0.0.8',
'torch>=1.10',
'torch_optimizer',
'torchvision>=0.8.2',
'tqdm',
'regex'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| deep-daze-main | setup.py |
__version__ = '0.11.1'
| deep-daze-main | deep_daze/version.py |
from deep_daze.deep_daze import DeepDaze, Imagine
| deep-daze-main | deep_daze/__init__.py |
import sys
import fire
from deep_daze import Imagine
def train(
text=None,
img=None,
learning_rate=1e-5,
num_layers=16,
hidden_size=256,
batch_size=4,
gradient_accumulate_every=4,
epochs=20,
iterations=1050,
save_every=100,
image_width=512,
deeper=False,
overwrite=False,
save_progress=True,
seed=None,
open_folder=True,
save_date_time=False,
start_image_path=None,
start_image_train_iters=50,
theta_initial=None,
theta_hidden=None,
start_image_lr=3e-4,
lower_bound_cutout=0.1,
upper_bound_cutout=1.0,
saturate_bound=False,
create_story=False,
story_start_words=5,
story_words_per_epoch=5,
story_separator=None,
averaging_weight=0.3,
gauss_sampling=False,
gauss_mean=0.6,
gauss_std=0.2,
do_cutout=True,
center_bias=False,
center_focus=2,
jit=True,
save_gif=False,
save_video=False,
model_name="ViT-B/32",
optimizer="AdamP"
):
"""
:param text: (required) A phrase less than 77 tokens which you would like to visualize.
:param img: The path to a jpg or png image which you would like to imagine. Can be combined with text.
:param learning_rate: The learning rate of the neural net.
:param hidden_size: The hidden layer size of the Siren net.
:param num_layers: The number of hidden layers to use in the Siren neural net.
:param batch_size: The number of generated images to pass into Siren before calculating loss. Decreasing this can lower memory and accuracy.
:param gradient_accumulate_every: Calculate a weighted loss of n samples for each iteration. Increasing this can help increase accuracy with lower batch sizes.
:param epochs: The number of epochs to run.
:param iterations: The number of times to calculate and backpropagate loss in a given epoch.
:param save_progress: Whether or not to save images generated before training Siren is complete.
:param save_every: Generate an image every time iterations is a multiple of this number.
:param open_folder: Whether or not to open a folder showing your generated images.
:param overwrite: Whether or not to overwrite existing generated images of the same name.
:param deeper: Uses a Siren neural net with 32 hidden layers.
:param image_width: The desired resolution of the image.
:param seed: A seed to be used for deterministic runs.
:param save_date_time: Save files with a timestamp prepended e.g. `%y%m%d-%H%M%S-my_phrase_here.png`
:param start_image_path: Path to the image you would like to prime the generator with initially
:param start_image_train_iters: Number of iterations for priming, defaults to 50
:param theta_initial: Hyperparameter describing the frequency of the color space. Only applies to the first layer of the network.
:param theta_hidden: Hyperparameter describing the frequency of the color space. Only applies to the hidden layers of the network.
:param start_image_lr: Learning rate for the start image training.
:param upper_bound_cutout: The upper bound for the cutouts used in generation.
:param lower_bound_cutout: The lower bound for the cutouts used in generation.
:param saturate_bound: If True, the LOWER_BOUND_CUTOUT is linearly increased to 0.75 during training.
:param create_story: Creates a story by optimizing each epoch on a new sliding-window of the input words. If this is enabled, much longer texts than 77 tokens can be used. Requires save_progress to visualize the transitions of the story.
:param story_start_words: Only used if create_story is True. How many words to optimize on for the first epoch.
:param story_words_per_epoch: Only used if create_story is True. How many words to add to the optimization goal per epoch after the first one.
:param story_separator: Only used if create_story is True. Defines a separator like '.' that splits the text into groups for each epoch. Separator needs to be in the text otherwise it will be ignored!
:param averaging_weight: How much to weigh the averaged features of the random cutouts over the individual random cutouts. Increasing this value leads to more details being represented at the cost of some global coherence and a parcellation into smaller scenes.
:param gauss_sampling: Whether to use sampling from a Gaussian distribution instead of a uniform distribution.
:param gauss_mean: The mean of the Gaussian sampling distribution.
:param gauss_std: The standard deviation of the Gaussian sampling distribution.
:param do_cutouts: Whether to use random cutouts as an augmentation. This basically needs to be turned on unless some new augmentations are added in code eventually.
:param center_bias: Whether to use a Gaussian distribution centered around the center of the image to sample the locations of random cutouts instead of a uniform distribution. Leads to the main generated objects to be more focused in the center.
:param center_focus: How much to focus on the center if using center_bias. std = sampling_range / center_focus. High values lead to a very correct representation in the center but washed out colors and details towards the edges,
:param jit: Whether to use the jit-compiled CLIP model. The jit model is faster, but only compatible with torch version 1.7.1.
:param save_gif: Only used if save_progress is True. Saves a GIF animation of the generation procedure using the saved frames.
:param save_video: Only used if save_progress is True. Saves a MP4 animation of the generation procedure using the saved frames.
"""
# Don't instantiate imagine if the user just wants help.
if any("--help" in arg for arg in sys.argv):
print("Type `imagine --help` for usage info.")
sys.exit()
num_layers = 32 if deeper else num_layers
imagine = Imagine(
text=text,
img=img,
lr=learning_rate,
num_layers=num_layers,
batch_size=batch_size,
gradient_accumulate_every=gradient_accumulate_every,
epochs=epochs,
iterations=iterations,
image_width=image_width,
save_every=save_every,
save_progress=save_progress,
seed=seed,
open_folder=open_folder,
save_date_time=save_date_time,
start_image_path=start_image_path,
start_image_train_iters=start_image_train_iters,
theta_initial=theta_initial,
theta_hidden=theta_hidden,
start_image_lr=start_image_lr,
lower_bound_cutout=lower_bound_cutout,
upper_bound_cutout=upper_bound_cutout,
saturate_bound=saturate_bound,
create_story=create_story,
story_start_words=story_start_words,
story_words_per_epoch=story_words_per_epoch,
story_separator=story_separator,
averaging_weight=averaging_weight,
gauss_sampling=gauss_sampling,
gauss_mean=gauss_mean,
gauss_std=gauss_std,
do_cutout=do_cutout,
center_bias=center_bias,
center_focus=center_focus,
jit=jit,
hidden_size=hidden_size,
model_name=model_name,
optimizer=optimizer,
save_gif=save_gif,
save_video=save_video,
)
print('Starting up...')
if not overwrite and imagine.filename.exists():
answer = input('Imagined image already exists, do you want to overwrite? (y/n) ').lower()
if answer not in ('yes', 'y'):
sys.exit()
imagine()
def main():
fire.Fire(train)
| deep-daze-main | deep_daze/cli.py |
import os
import subprocess
import sys
import random
from datetime import datetime
from pathlib import Path
import torch
import torch.nn.functional as F
from siren_pytorch import SirenNet, SirenWrapper
from torch import nn
from torch.cuda.amp import GradScaler, autocast
from torch_optimizer import DiffGrad, AdamP
import numpy as np
from PIL import Image
from imageio import imread, mimsave
import torchvision.transforms as T
from tqdm import trange, tqdm
from .clip import load, tokenize
# Helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def interpolate(image, size):
return F.interpolate(image, (size, size), mode='bilinear', align_corners=False)
def rand_cutout(image, size, center_bias=False, center_focus=2):
width = image.shape[-1]
min_offset = 0
max_offset = width - size
if center_bias:
# sample around image center
center = max_offset / 2
std = center / center_focus
offset_x = int(random.gauss(mu=center, sigma=std))
offset_y = int(random.gauss(mu=center, sigma=std))
# resample uniformly if over boundaries
offset_x = random.randint(min_offset, max_offset) if (offset_x > max_offset or offset_x < min_offset) else offset_x
offset_y = random.randint(min_offset, max_offset) if (offset_y > max_offset or offset_y < min_offset) else offset_y
else:
offset_x = random.randint(min_offset, max_offset)
offset_y = random.randint(min_offset, max_offset)
cutout = image[:, :, offset_x:offset_x + size, offset_y:offset_y + size]
return cutout
def create_clip_img_transform(image_width):
clip_mean = [0.48145466, 0.4578275, 0.40821073]
clip_std = [0.26862954, 0.26130258, 0.27577711]
transform = T.Compose([
#T.ToPILImage(),
T.Resize(image_width),
T.CenterCrop((image_width, image_width)),
T.ToTensor(),
T.Normalize(mean=clip_mean, std=clip_std)
])
return transform
def open_folder(path):
if os.path.isfile(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
return
cmd_list = None
if sys.platform == 'darwin':
cmd_list = ['open', '--', path]
elif sys.platform == 'linux2' or sys.platform == 'linux':
cmd_list = ['xdg-open', path]
elif sys.platform in ['win32', 'win64']:
cmd_list = ['explorer', path.replace('/', '\\')]
if cmd_list is None:
return
try:
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError:
pass
except OSError:
pass
def norm_siren_output(img):
return ((img + 1) * 0.5).clamp(0.0, 1.0)
def create_text_path(context_length, text=None, img=None, encoding=None, separator=None):
if text is not None:
if separator is not None and separator in text:
#Reduces filename to first epoch text
text = text[:text.index(separator, )]
input_name = text.replace(" ", "_")[:context_length]
elif img is not None:
if isinstance(img, str):
input_name = "".join(img.replace(" ", "_").split(".")[:-1])
else:
input_name = "PIL_img"
else:
input_name = "your_encoding"
return input_name
class DeepDaze(nn.Module):
def __init__(
self,
clip_perceptor,
clip_norm,
input_res,
total_batches,
batch_size,
num_layers=8,
image_width=512,
loss_coef=100,
theta_initial=None,
theta_hidden=None,
lower_bound_cutout=0.1, # should be smaller than 0.8
upper_bound_cutout=1.0,
saturate_bound=False,
gauss_sampling=False,
gauss_mean=0.6,
gauss_std=0.2,
do_cutout=True,
center_bias=False,
center_focus=2,
hidden_size=256,
averaging_weight=0.3,
):
super().__init__()
# load clip
self.perceptor = clip_perceptor
self.input_resolution = input_res
self.normalize_image = clip_norm
self.loss_coef = loss_coef
self.image_width = image_width
self.batch_size = batch_size
self.total_batches = total_batches
self.num_batches_processed = 0
w0 = default(theta_hidden, 30.)
w0_initial = default(theta_initial, 30.)
siren = SirenNet(
dim_in=2,
dim_hidden=hidden_size,
num_layers=num_layers,
dim_out=3,
use_bias=True,
w0=w0,
w0_initial=w0_initial
)
self.model = SirenWrapper(
siren,
image_width=image_width,
image_height=image_width
)
self.saturate_bound = saturate_bound
self.saturate_limit = 0.75 # cutouts above this value lead to destabilization
self.lower_bound_cutout = lower_bound_cutout
self.upper_bound_cutout = upper_bound_cutout
self.gauss_sampling = gauss_sampling
self.gauss_mean = gauss_mean
self.gauss_std = gauss_std
self.do_cutout = do_cutout
self.center_bias = center_bias
self.center_focus = center_focus
self.averaging_weight = averaging_weight
def sample_sizes(self, lower, upper, width, gauss_mean):
if self.gauss_sampling:
gauss_samples = torch.zeros(self.batch_size).normal_(mean=gauss_mean, std=self.gauss_std)
outside_bounds_mask = (gauss_samples > upper) | (gauss_samples < upper)
gauss_samples[outside_bounds_mask] = torch.zeros((len(gauss_samples[outside_bounds_mask]),)).uniform_(lower, upper)
sizes = (gauss_samples * width).int()
else:
lower *= width
upper *= width
sizes = torch.randint(int(lower), int(upper), (self.batch_size,))
return sizes
def forward(self, text_embed, return_loss=True, dry_run=False):
out = self.model()
out = norm_siren_output(out)
if not return_loss:
return out
# determine upper and lower sampling bound
width = out.shape[-1]
lower_bound = self.lower_bound_cutout
if self.saturate_bound:
progress_fraction = self.num_batches_processed / self.total_batches
lower_bound += (self.saturate_limit - self.lower_bound_cutout) * progress_fraction
# sample cutout sizes between lower and upper bound
sizes = self.sample_sizes(lower_bound, self.upper_bound_cutout, width, self.gauss_mean)
# create normalized random cutouts
if self.do_cutout:
image_pieces = [rand_cutout(out, size, center_bias=self.center_bias, center_focus=self.center_focus) for size in sizes]
image_pieces = [interpolate(piece, self.input_resolution) for piece in image_pieces]
else:
image_pieces = [interpolate(out.clone(), self.input_resolution) for _ in sizes]
# normalize
image_pieces = torch.cat([self.normalize_image(piece) for piece in image_pieces])
# calc image embedding
with autocast(enabled=False):
image_embed = self.perceptor.encode_image(image_pieces)
# calc loss
# loss over averaged features of cutouts
avg_image_embed = image_embed.mean(dim=0).unsqueeze(0)
averaged_loss = -self.loss_coef * torch.cosine_similarity(text_embed, avg_image_embed, dim=-1).mean()
# loss over all cutouts
general_loss = -self.loss_coef * torch.cosine_similarity(text_embed, image_embed, dim=-1).mean()
# merge losses
loss = averaged_loss * (self.averaging_weight) + general_loss * (1 - self.averaging_weight)
# count batches
if not dry_run:
self.num_batches_processed += self.batch_size
return out, loss
class Imagine(nn.Module):
def __init__(
self,
*,
text=None,
img=None,
clip_encoding=None,
lr=1e-5,
batch_size=4,
gradient_accumulate_every=4,
save_every=100,
image_width=512,
num_layers=16,
epochs=20,
iterations=1050,
save_progress=True,
seed=None,
open_folder=True,
save_date_time=False,
start_image_path=None,
start_image_train_iters=10,
start_image_lr=3e-4,
theta_initial=None,
theta_hidden=None,
model_name="ViT-B/32",
lower_bound_cutout=0.1, # should be smaller than 0.8
upper_bound_cutout=1.0,
saturate_bound=False,
averaging_weight=0.3,
create_story=False,
story_start_words=5,
story_words_per_epoch=5,
story_separator=None,
gauss_sampling=False,
gauss_mean=0.6,
gauss_std=0.2,
do_cutout=True,
center_bias=False,
center_focus=2,
optimizer="AdamP",
jit=True,
hidden_size=256,
save_gif=False,
save_video=False,
):
super().__init__()
if exists(seed):
tqdm.write(f'setting seed: {seed}')
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
# fields for story creation:
self.create_story = create_story
self.words = None
self.separator = str(story_separator) if story_separator is not None else None
if self.separator is not None and text is not None:
#exit if text is just the separator
if str(text).replace(' ','').replace(self.separator,'') == '':
print('Exiting because the text only consists of the separator! Needs words or phrases that are separated by the separator.')
exit()
#adds a space to each separator and removes double spaces that might be generated
text = text.replace(self.separator,self.separator+' ').replace(' ',' ').strip()
self.all_words = text.split(" ") if text is not None else None
self.num_start_words = story_start_words
self.words_per_epoch = story_words_per_epoch
if create_story:
assert text is not None, "We need text input to create a story..."
# overwrite epochs to match story length
num_words = len(self.all_words)
self.epochs = 1 + (num_words - self.num_start_words) / self.words_per_epoch
# add one epoch if not divisible
self.epochs = int(self.epochs) if int(self.epochs) == self.epochs else int(self.epochs) + 1
if self.separator is not None:
if self.separator not in text:
print("Separator '"+self.separator+"' will be ignored since not in text!")
self.separator = None
else:
self.epochs = len(list(filter(None,text.split(self.separator))))
print("Running for", self.epochs, "epochs" + (" (split with '"+self.separator+"' as the separator)" if self.separator is not None else ""))
else:
self.epochs = epochs
# jit models only compatible with version 1.7.1
if "1.7.1" not in torch.__version__:
if jit == True:
print("Setting jit to False because torch version is not 1.7.1.")
jit = False
# Load CLIP
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
clip_perceptor, norm = load(model_name, jit=jit, device=self.device)
self.perceptor = clip_perceptor.eval()
for param in self.perceptor.parameters():
param.requires_grad = False
if jit == False:
input_res = clip_perceptor.visual.input_resolution
else:
input_res = clip_perceptor.input_resolution.item()
self.clip_transform = create_clip_img_transform(input_res)
self.iterations = iterations
self.image_width = image_width
total_batches = self.epochs * self.iterations * batch_size * gradient_accumulate_every
model = DeepDaze(
self.perceptor,
norm,
input_res,
total_batches,
batch_size=batch_size,
image_width=image_width,
num_layers=num_layers,
theta_initial=theta_initial,
theta_hidden=theta_hidden,
lower_bound_cutout=lower_bound_cutout,
upper_bound_cutout=upper_bound_cutout,
saturate_bound=saturate_bound,
gauss_sampling=gauss_sampling,
gauss_mean=gauss_mean,
gauss_std=gauss_std,
do_cutout=do_cutout,
center_bias=center_bias,
center_focus=center_focus,
hidden_size=hidden_size,
averaging_weight=averaging_weight,
).to(self.device)
self.model = model
self.scaler = GradScaler()
siren_params = model.model.parameters()
if optimizer == "AdamP":
self.optimizer = AdamP(siren_params, lr)
elif optimizer == "Adam":
self.optimizer = torch.optim.Adam(siren_params, lr)
elif optimizer == "DiffGrad":
self.optimizer = DiffGrad(siren_params, lr)
self.gradient_accumulate_every = gradient_accumulate_every
self.save_every = save_every
self.save_date_time = save_date_time
self.open_folder = open_folder
self.save_progress = save_progress
self.text = text
self.image = img
self.textpath = create_text_path(self.perceptor.context_length, text=text, img=img, encoding=clip_encoding, separator=story_separator)
self.filename = self.image_output_path()
# create coding to optimize for
self.clip_encoding = self.create_clip_encoding(text=text, img=img, encoding=clip_encoding)
self.start_image = None
self.start_image_train_iters = start_image_train_iters
self.start_image_lr = start_image_lr
if exists(start_image_path):
file = Path(start_image_path)
assert file.exists(), f'file does not exist at given starting image path {start_image_path}'
image = Image.open(str(file))
start_img_transform = T.Compose([T.Resize(image_width),
T.CenterCrop((image_width, image_width)),
T.ToTensor()])
image_tensor = start_img_transform(image).unsqueeze(0).to(self.device)
self.start_image = image_tensor
self.save_gif = save_gif
self.save_video = save_video
def create_clip_encoding(self, text=None, img=None, encoding=None):
self.text = text
self.img = img
if encoding is not None:
encoding = encoding.to(self.device)
elif self.create_story:
encoding = self.update_story_encoding(epoch=0, iteration=1)
elif text is not None and img is not None:
encoding = (self.create_text_encoding(text) + self.create_img_encoding(img)) / 2
elif text is not None:
encoding = self.create_text_encoding(text)
elif img is not None:
encoding = self.create_img_encoding(img)
return encoding
def create_text_encoding(self, text):
tokenized_text = tokenize(text).to(self.device)
with torch.no_grad():
text_encoding = self.perceptor.encode_text(tokenized_text).detach()
return text_encoding
def create_img_encoding(self, img):
if isinstance(img, str):
img = Image.open(img)
normed_img = self.clip_transform(img).unsqueeze(0).to(self.device)
with torch.no_grad():
img_encoding = self.perceptor.encode_image(normed_img).detach()
return img_encoding
def set_clip_encoding(self, text=None, img=None, encoding=None):
encoding = self.create_clip_encoding(text=text, img=img, encoding=encoding)
self.clip_encoding = encoding.to(self.device)
def index_of_first_separator(self) -> int:
for c, word in enumerate(self.all_words):
if self.separator in str(word):
return c +1
def update_story_encoding(self, epoch, iteration):
if self.separator is not None:
self.words = " ".join(self.all_words[:self.index_of_first_separator()])
#removes separator from epoch-text
self.words = self.words.replace(self.separator,'')
self.all_words = self.all_words[self.index_of_first_separator():]
else:
if self.words is None:
self.words = " ".join(self.all_words[:self.num_start_words])
self.all_words = self.all_words[self.num_start_words:]
else:
# add words_per_epoch new words
count = 0
while count < self.words_per_epoch and len(self.all_words) > 0:
new_word = self.all_words[0]
self.words = " ".join(self.words.split(" ") + [new_word])
self.all_words = self.all_words[1:]
count += 1
# remove words until it fits in context length
while len(self.words) > self.perceptor.context_length:
# remove first word
self.words = " ".join(self.words.split(" ")[1:])
# get new encoding
print("Now thinking of: ", '"', self.words, '"')
sequence_number = self.get_img_sequence_number(epoch, iteration)
# save new words to disc
with open("story_transitions.txt", "a") as f:
f.write(f"{epoch}, {sequence_number}, {self.words}\n")
encoding = self.create_text_encoding(self.words)
return encoding
def image_output_path(self, sequence_number=None):
"""
Returns underscore separated Path.
A current timestamp is prepended if `self.save_date_time` is set.
Sequence number left padded with 6 zeroes is appended if `save_every` is set.
:rtype: Path
"""
output_path = self.textpath
if sequence_number:
sequence_number_left_padded = str(sequence_number).zfill(6)
output_path = f"{output_path}.{sequence_number_left_padded}"
if self.save_date_time:
current_time = datetime.now().strftime("%y%m%d-%H%M%S_%f")
output_path = f"{current_time}_{output_path}"
return Path(f"{output_path}.jpg")
def train_step(self, epoch, iteration):
total_loss = 0
for _ in range(self.gradient_accumulate_every):
with autocast(enabled=True):
out, loss = self.model(self.clip_encoding)
loss = loss / self.gradient_accumulate_every
total_loss += loss
self.scaler.scale(loss).backward()
out = out.cpu().float().clamp(0., 1.)
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
if (iteration % self.save_every == 0) and self.save_progress:
self.save_image(epoch, iteration, img=out)
return out, total_loss
def get_img_sequence_number(self, epoch, iteration):
current_total_iterations = epoch * self.iterations + iteration
sequence_number = current_total_iterations // self.save_every
return sequence_number
@torch.no_grad()
def save_image(self, epoch, iteration, img=None):
sequence_number = self.get_img_sequence_number(epoch, iteration)
if img is None:
img = self.model(self.clip_encoding, return_loss=False).cpu().float().clamp(0., 1.)
self.filename = self.image_output_path(sequence_number=sequence_number)
pil_img = T.ToPILImage()(img.squeeze())
pil_img.save(self.filename, quality=95, subsampling=0)
pil_img.save(f"{self.textpath}.jpg", quality=95, subsampling=0)
tqdm.write(f'image updated at "./{str(self.filename)}"')
def generate_gif(self):
images = []
for file_name in sorted(os.listdir('./')):
if file_name.startswith(self.textpath) and file_name != f'{self.textpath}.jpg':
images.append(imread(os.path.join('./', file_name)))
if self.save_video:
mimsave(f'{self.textpath}.mp4', images)
print(f'Generated image generation animation at ./{self.textpath}.mp4')
if self.save_gif:
mimsave(f'{self.textpath}.gif', images)
print(f'Generated image generation animation at ./{self.textpath}.gif')
def forward(self):
if exists(self.start_image):
tqdm.write('Preparing with initial image...')
optim = DiffGrad(self.model.model.parameters(), lr = self.start_image_lr)
pbar = trange(self.start_image_train_iters, desc='iteration')
try:
for _ in pbar:
loss = self.model.model(self.start_image)
loss.backward()
pbar.set_description(f'loss: {loss.item():.2f}')
optim.step()
optim.zero_grad()
except KeyboardInterrupt:
print('interrupted by keyboard, gracefully exiting')
return exit()
del self.start_image
del optim
tqdm.write(f'Imagining "{self.textpath}" from the depths of my weights...')
with torch.no_grad():
self.model(self.clip_encoding, dry_run=True) # do one warmup step due to potential issue with CLIP and CUDA
if self.open_folder:
open_folder('./')
self.open_folder = False
try:
for epoch in trange(self.epochs, desc='epochs'):
pbar = trange(self.iterations, desc='iteration')
for i in pbar:
_, loss = self.train_step(epoch, i)
pbar.set_description(f'loss: {loss.item():.2f}')
# Update clip_encoding per epoch if we are creating a story
if self.create_story:
self.clip_encoding = self.update_story_encoding(epoch, i)
except KeyboardInterrupt:
print('interrupted by keyboard, gracefully exiting')
return
self.save_image(epoch, i) # one final save at end
if (self.save_gif or self.save_video) and self.save_progress:
self.generate_gif()
| deep-daze-main | deep_daze/deep_daze.py |
from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from pathlib import Path
import hashlib
import os
import urllib
import warnings
from typing import Union, List
from torchvision.transforms import Compose, Normalize
from tqdm import tqdm
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(
total=int(source.info().get("Content-Length")),
unit='iB',
unit_scale=True,
desc=f"Downloading {filename}",
) as loop:
while True:
buffer = source.read(524288)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform():
return Compose([
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform()
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform()
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
import html
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
_tokenizer = SimpleTokenizer()
| deep-daze-main | deep_daze/clip.py |
from setuptools import setup, find_packages
setup(
name = 'reformer_pytorch',
packages = find_packages(exclude=['examples', 'pretraining']),
version = '1.4.4',
license='MIT',
description = 'Reformer, the Efficient Transformer, Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/reformer-pytorch',
keywords = ['transformers', 'attention', 'artificial intelligence'],
install_requires=[
'axial-positional-embedding>=0.1.0',
'einops',
'local-attention',
'product-key-memory',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| reformer-pytorch-master | setup.py |
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from reformer_pytorch.reformer_pytorch import ReformerLM
from reformer_pytorch.autopadder import Autopadder
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class TrainingWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
assert isinstance(net, ReformerLM), 'generative trainer wrapper can only accept ReformerLM class'
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = Autopadder(net)
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('input_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, input_mask=input_mask, **kwargs)[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
input_mask = F.pad(input_mask, (0, 1), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, return_loss = False, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
if not return_loss:
if not isinstance(x, torch.Tensor):
x = pad(x)
return self.net(x, **kwargs)
if isinstance(x, torch.Tensor):
xi = x[:, :-1]
xo = x[:, 1:]
else:
xi = pad(list(map(lambda t: t[:-1], x)))
xo = pad(list(map(lambda t: t[1:], x)))
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| reformer-pytorch-master | reformer_pytorch/generative_tools.py |
import math
import torch
from torch import nn
import torch.nn.functional as F
from reformer_pytorch.reformer_pytorch import Reformer, ReformerLM, LSHSelfAttention
def pad_to_multiple(tensor, seqlen, multiple, dim=-1):
m = seqlen / multiple
if m.is_integer():
return tensor
remainder = math.ceil(m) * multiple - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value=0)
class Autopadder(nn.Module):
def __init__(self, net):
super().__init__()
assert isinstance(net, (LSHSelfAttention, Reformer, ReformerLM)), 'only modules LSHSelfAttention, Reformer, ReformerLM accepted'
self.net = net
reformer = net.reformer if isinstance(net, ReformerLM) else net
self.pad_dim = -1 if isinstance(net, ReformerLM) else -2
self.bucket_size = reformer.bucket_size
self.num_mem_kv = reformer.num_mem_kv
self.full_attn_thres = reformer.full_attn_thres
def forward(self, x, **kwargs):
b, t, m, device = *x.shape[:2], self.num_mem_kv, x.device
keys = kwargs.get('keys')
input_mask = kwargs.get('input_mask')
input_attn_mask = kwargs.get('input_attn_mask')
k_len = 0 if keys is None else keys.shape[1]
seqlen = t + m + k_len
if seqlen > self.full_attn_thres:
if input_mask is None:
input_mask = torch.full((b, t), True, device=x.device, dtype=torch.bool)
x = pad_to_multiple(x, seqlen, self.bucket_size * 2, dim=self.pad_dim)
if input_mask is not None:
new_mask = F.pad(input_mask, (0, x.shape[1] - input_mask.shape[1]), value=False)
kwargs.update(input_mask=new_mask)
if input_attn_mask is not None:
offset = x.shape[1] - input_attn_mask.shape[1]
new_mask = F.pad(input_attn_mask, (0, offset, 0, offset), value=False)
kwargs.update(input_attn_mask=new_mask)
out = self.net(x, **kwargs)
return out[:, 0:t]
| reformer-pytorch-master | reformer_pytorch/autopadder.py |
import re
from torch import nn
from reformer_pytorch.reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
ENC_PREFIX = 'enc_'
DEC_PREFIX = 'dec_'
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return bool(re.match(f'^{prefix}', str))
def group_by_key_prefix(prefix, d):
return group_dict_by_key(lambda x: string_begins_with(prefix, x), d)
def group_by_key_prefix_and_remove_prefix(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(lambda x: string_begins_with(prefix, x), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
def extract_enc_dec_kwargs(kwargs):
enc_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(ENC_PREFIX, kwargs)
dec_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(DEC_PREFIX, kwargs)
return enc_kwargs, dec_kwargs, kwargs
def extract_and_set_enc_dec_kwargs(kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_enc_dec_kwargs(kwargs)
if 'input_mask' in enc_kwargs:
dec_kwargs.setdefault('context_mask', enc_kwargs['input_mask'])
return enc_kwargs, dec_kwargs, kwargs
class ReformerEncDec(nn.Module):
def __init__(self, dim, ignore_index = 0, pad_value = 0, **kwargs):
super().__init__()
enc_kwargs, dec_kwargs, _ = extract_enc_dec_kwargs(kwargs)
assert 'return_embedding' not in enc_kwargs, 'you cannot manually set the return embeddings flag for the encoder'
assert 'dim' not in dec_kwargs and 'dim' not in enc_kwargs, 'you must set the dim for both encoder and decoder'
enc_kwargs['dim'] = dec_kwargs['dim'] = dim
enc_kwargs['return_embeddings'] = True
dec_kwargs['causal'] = True
enc_kwargs.setdefault('bucket_size', 64)
dec_kwargs.setdefault('bucket_size', enc_kwargs['bucket_size'] * 2)
enc = ReformerLM(**enc_kwargs)
dec = ReformerLM(**dec_kwargs)
self.enc = TrainingWrapper(enc, ignore_index = ignore_index, pad_value = pad_value)
self.dec = TrainingWrapper(dec, ignore_index = ignore_index, pad_value = pad_value)
def generate(self, seq_in, seq_out_start, seq_len, **kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs)
enc_keys = self.enc(seq_in, **enc_kwargs)
return self.dec.generate(seq_out_start, seq_len, keys = enc_keys, **{**dec_kwargs, **kwargs})
def forward(self, seq_in, seq_out, return_loss = False, **kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs)
enc_keys = self.enc(seq_in, **enc_kwargs)
return self.dec(seq_out, return_loss = return_loss, keys = enc_keys, **dec_kwargs)
| reformer-pytorch-master | reformer_pytorch/reformer_enc_dec.py |
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g, depth=None, send_signal = False):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
self.depth = depth
self.send_signal = send_signal
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = False
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = True
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class IrreversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = f
self.g = g
def forward(self, x, f_args, g_args):
x1, x2 = torch.chunk(x, 2, dim=2)
y1 = x1 + self.f(x2, **f_args)
y2 = x2 + self.g(y1, **g_args)
return torch.cat([y1, y2], dim=2)
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class ReversibleSequence(nn.Module):
def __init__(self, blocks, layer_dropout = 0., reverse_thres = 0, send_signal = False):
super().__init__()
self.layer_dropout = layer_dropout
self.reverse_thres = reverse_thres
self.blocks = nn.ModuleList([ReversibleBlock(f, g, depth, send_signal) for depth, (f, g) in enumerate(blocks)])
self.irrev_blocks = nn.ModuleList([IrreversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, arg_route = (True, False), **kwargs):
reverse = x.shape[1] > self.reverse_thres
blocks = self.blocks if reverse else self.irrev_blocks
if self.training and self.layer_dropout > 0:
to_drop = torch.empty(len(self.blocks)).uniform_(0, 1) < self.layer_dropout
blocks = [block for block, drop in zip(self.blocks, to_drop) if not drop]
blocks = self.blocks[:1] if len(blocks) == 0 else blocks
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
if not reverse:
for block in blocks:
x = block(x, **block_kwargs)
return x
return _ReversibleFunction.apply(x, blocks, block_kwargs)
| reformer-pytorch-master | reformer_pytorch/reversible.py |
from torch import nn
from reformer_pytorch.reformer_pytorch import LSHAttention, LSHSelfAttention
from collections import defaultdict
class Recorder(nn.Module):
def __init__(self, net):
super().__init__()
self.iter = 0
self.recordings = defaultdict(list)
self.net = net
self.on = True
self.ejected = False
def eject(self):
self.ejected = True
self.clear()
self.unwire()
return self.net
def wire(self):
for module in self.net.modules():
if isinstance(module, LSHAttention):
module._return_attn = True
if isinstance(module, LSHSelfAttention):
module.callback = self.record
def unwire(self):
for module in self.net.modules():
if isinstance(module, LSHAttention):
module._return_attn = False
if isinstance(module, LSHSelfAttention):
module.callback = None
def turn_on(self):
self.on = True
def turn_off(self):
self.on = False
def clear(self):
del self.recordings
self.recordings = defaultdict(list)
self.iter = 0
def record(self, attn, buckets):
if not self.on: return
data = {'attn': attn.detach().cpu(), 'buckets': buckets.detach().cpu()}
self.recordings[self.iter].append(data)
def forward(self, x, **kwargs):
assert not self.ejected, 'Recorder has already been ejected and disposed'
if self.on:
self.wire()
out = self.net(x, **kwargs)
self.iter += 1
self.unwire()
return out
| reformer-pytorch-master | reformer_pytorch/recorder.py |
from reformer_pytorch.reformer_pytorch import LSHAttention, LSHSelfAttention, Reformer, ReformerLM
from reformer_pytorch.reformer_enc_dec import ReformerEncDec
from reformer_pytorch.recorder import Recorder
from reformer_pytorch.autopadder import Autopadder
| reformer-pytorch-master | reformer_pytorch/__init__.py |
import math
import torch
import torch.nn as nn
from torch.nn import Identity
import torch.nn.functional as F
from torch.autograd import Function
from functools import partial, reduce, wraps
from itertools import chain
from operator import mul
from local_attention import LocalAttention
from axial_positional_embedding import AxialPositionalEmbedding
from product_key_memory import PKM
from reformer_pytorch.reversible import ReversibleSequence
from einops import rearrange, repeat
#constants
TOKEN_SELF_ATTN_VALUE = -5e4 # carefully set for half precision to work
# helper fns
def exists(val):
return val is not None
def sort_key_val(t1, t2, dim=-1):
values, indices = t1.sort(dim=dim)
t2 = t2.expand_as(t1)
return values, t2.gather(dim, indices)
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
def process_inputs_chunk(fn, chunks=1, dim=0):
def inner_fn(*args, **kwargs):
keys, values, len_args = kwargs.keys(), kwargs.values(), len(args)
chunked_args = list(zip(*map(lambda x: x.chunk(chunks, dim=dim), list(args) + list(values))))
all_args = map(lambda x: (x[:len_args], dict(zip(keys, x[len_args:]))), chunked_args)
outputs = [fn(*c_args, **c_kwargs) for c_args, c_kwargs in all_args]
return tuple(map(lambda x: torch.cat(x, dim=dim), zip(*outputs)))
return inner_fn
def chunked_sum(tensor, chunks=1):
*orig_size, last_dim = tensor.shape
tensor = tensor.reshape(-1, last_dim)
summed_tensors = [c.sum(dim=-1) for c in tensor.chunk(chunks, dim=0)]
return torch.cat(summed_tensors, dim=0).reshape(orig_size)
def default(val, default_val):
return default_val if val is None else val
def cast_tuple(x):
return x if isinstance(x, tuple) else (x,)
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def cache_fn(f):
cache = None
@wraps(f)
def cached_fn(*args, **kwargs):
nonlocal cache
if cache is not None:
return cache
cache = f(*args, **kwargs)
return cache
return cached_fn
def cache_method_decorator(cache_attr, cache_namespace, reexecute = False):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, key_namespace=None, fetch=False, set_cache=True, **kwargs):
namespace_str = str(default(key_namespace, ''))
_cache = getattr(self, cache_attr)
_keyname = f'{cache_namespace}:{namespace_str}'
if fetch:
val = _cache[_keyname]
if reexecute:
fn(self, *args, **kwargs)
else:
val = fn(self, *args, **kwargs)
if set_cache:
setattr(self, cache_attr, {**_cache, **{_keyname: val}})
return val
return wrapper
return inner_fn
def expand_dim(dim, k, t):
t = t.unsqueeze(dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def merge_dims(ind_from, ind_to, tensor):
shape = list(tensor.shape)
arr_slice = slice(ind_from, ind_to + 1)
shape[arr_slice] = [reduce(mul, shape[arr_slice])]
return tensor.reshape(*shape)
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
# helper classes
class Always(nn.Module):
def __init__(self, val):
super().__init__()
self.val = val
def forward(self, *args, **kwargs):
return self.val
class MatrixMultiply(nn.Module):
def __init__(self, tensor, transpose = False, normalize = False):
super().__init__()
self.tensor = tensor
self.transpose = transpose
self.normalize = normalize
def forward(self, x):
tensor = self.tensor
if self.normalize:
tensor = F.normalize(tensor, dim=-1)
if self.transpose:
tensor = tensor.t()
return x @ tensor
class ReZero(nn.Module):
def __init__(self, fn):
super().__init__()
self.g = nn.Parameter(torch.zeros(1))
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.g
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return x / n * self.g
class PreNorm(nn.Module):
def __init__(self, norm_class, dim, fn):
super().__init__()
self.norm = norm_class(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class Chunk(nn.Module):
def __init__(self, chunks, fn, along_dim = -1):
super().__init__()
self.dim = along_dim
self.chunks = chunks
self.fn = fn
def forward(self, x, **kwargs):
if self.chunks == 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.chunks, dim = self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)
# LSH attention as described in https://openreview.net/pdf?id=rkgNKkHtvB
# adapted from trax, stripped to what paper said needed to work
# namely that buckets need to be at least 64 with 8 rounds of hashing
# https://github.com/google/trax/blob/master/trax/layers/research/efficient_attention.py#L442
class LSHAttention(nn.Module):
def __init__( self,
dropout = 0.,
bucket_size = 64,
n_hashes = 8,
causal = False,
allow_duplicate_attention = True,
attend_across_buckets = True,
rehash_each_round = True,
drop_for_hash_rate = 0.0,
random_rotations_per_head = False,
return_attn = False):
super().__init__()
if dropout >= 1.0:
raise ValueError('Dropout rates must be lower than 1.')
self.dropout = nn.Dropout(dropout)
self.dropout_for_hash = nn.Dropout(drop_for_hash_rate)
assert rehash_each_round or allow_duplicate_attention, (
'The setting {allow_duplicate_attention=False, rehash_each_round=False}'
' is not implemented.')
self.causal = causal
self.bucket_size = bucket_size
self.n_hashes = n_hashes
self._allow_duplicate_attention = allow_duplicate_attention
self._attend_across_buckets = attend_across_buckets
self._rehash_each_round = rehash_each_round
self._random_rotations_per_head = random_rotations_per_head
# will expend extra computation to return attention matrix
self._return_attn = return_attn
# cache buckets for reversible network, reported by authors to make Reformer work at depth
self._cache = {}
@cache_method_decorator('_cache', 'buckets', reexecute=True)
def hash_vectors(self, n_buckets, vecs):
batch_size = vecs.shape[0]
device = vecs.device
# See https://arxiv.org/pdf/1509.02897.pdf
# We sample a different random rotation for each round of hashing to
# decrease the probability of hash misses.
assert n_buckets % 2 == 0
rot_size = n_buckets
rotations_shape = (
batch_size if self._random_rotations_per_head else 1,
vecs.shape[-1],
self.n_hashes if self._rehash_each_round else 1,
rot_size // 2)
random_rotations = torch.randn(rotations_shape, dtype=vecs.dtype, device=device).expand(batch_size, -1, -1, -1)
dropped_vecs = self.dropout_for_hash(vecs)
rotated_vecs = torch.einsum('btf,bfhi->bhti', dropped_vecs, random_rotations)
if self._rehash_each_round:
# rotated_vectors size [batch,n_hash,seq_len,buckets]
rotated_vecs = torch.cat([rotated_vecs, -rotated_vecs], dim=-1)
buckets = torch.argmax(rotated_vecs, dim=-1)
else:
rotated_vecs = torch.cat([rotated_vecs, -rotated_vecs], dim=-1)
# In this configuration, we map each item to the top self.n_hashes buckets
rotated_vecs = torch.squeeze(rotated_vecs, 1)
bucket_range = torch.arange(rotated_vecs.shape[-1], device=device)
bucket_range = torch.reshape(bucket_range, (1, -1))
bucket_range = bucket_range.expand_as(rotated_vecs)
_, buckets = sort_key_val(rotated_vecs, bucket_range, dim=-1)
# buckets size [batch size, seq_len, buckets]
buckets = buckets[... , -self.n_hashes:].transpose(1, 2)
# buckets is now (self.n_hashes, seq_len). Next we add offsets so that
# bucket numbers from different hashing rounds don't overlap.
offsets = torch.arange(self.n_hashes, device=device)
offsets = torch.reshape(offsets * n_buckets, (1, -1, 1))
buckets = torch.reshape(buckets + offsets, (batch_size, -1,))
return buckets
def forward(self, qk, v, query_len = None, input_mask = None, input_attn_mask = None, pos_emb = None, **kwargs):
batch_size, seqlen, dim, device = *qk.shape, qk.device
query_len = default(query_len, seqlen)
is_reverse = kwargs.pop('_reverse', False)
depth = kwargs.pop('_depth', None)
assert seqlen % (self.bucket_size * 2) == 0, f'Sequence length ({seqlen}) needs to be divisible by target bucket size x 2 - {self.bucket_size * 2}'
n_buckets = seqlen // self.bucket_size
buckets = self.hash_vectors(n_buckets, qk, key_namespace=depth, fetch=is_reverse, set_cache=self.training)
# We use the same vector as both a query and a key.
assert int(buckets.shape[1]) == self.n_hashes * seqlen
total_hashes = self.n_hashes
ticker = torch.arange(total_hashes * seqlen, device=device).unsqueeze(0).expand_as(buckets)
buckets_and_t = seqlen * buckets + (ticker % seqlen)
buckets_and_t = buckets_and_t.detach()
# Hash-based sort ("s" at the start of variable names means "sorted")
sbuckets_and_t, sticker = sort_key_val(buckets_and_t, ticker, dim=-1)
_, undo_sort = sticker.sort(dim=-1)
del ticker
sbuckets_and_t = sbuckets_and_t.detach()
sticker = sticker.detach()
undo_sort = undo_sort.detach()
if exists(pos_emb):
qk = apply_rotary_pos_emb(qk, pos_emb)
st = (sticker % seqlen)
sqk = batched_index_select(qk, st)
sv = batched_index_select(v, st)
# Split off a "bin" axis so that attention only occurs within chunks.
chunk_size = total_hashes * n_buckets
bq_t = bkv_t = torch.reshape(st, (batch_size, chunk_size, -1))
bqk = torch.reshape(sqk, (batch_size, chunk_size, -1, dim))
bv = torch.reshape(sv, (batch_size, chunk_size, -1, dim))
# Hashing operates on unit-length vectors. Unnormalized query vectors are
# fine because they effectively provide a learnable temperature for the
# attention softmax, but normalizing keys is needed so that similarity for
# the purposes of attention correctly corresponds to hash locality.
bq = bqk
bk = F.normalize(bqk, p=2, dim=-1).type_as(bq)
# Allow each chunk to attend within itself, and also one chunk back. Chunk
# boundaries might occur in the middle of a sequence of items from the
# same bucket, so this increases the chances of attending to relevant items.
def look_one_back(x):
x_extra = torch.cat([x[:, -1:, ...], x[:, :-1, ...]], dim=1)
return torch.cat([x, x_extra], dim=2)
bk = look_one_back(bk)
bv = look_one_back(bv)
bkv_t = look_one_back(bkv_t)
# Dot-product attention.
dots = torch.einsum('bhie,bhje->bhij', bq, bk) * (dim ** -0.5)
masked_value = max_neg_value(dots)
# Mask for post qk attention logits of the input sequence
if input_attn_mask is not None:
input_attn_mask = F.pad(input_attn_mask, (0, seqlen - input_attn_mask.shape[-1], 0, seqlen - input_attn_mask.shape[-2]), value=True)
dot_attn_indices = ((bq_t * seqlen)[:, :, :, None] + bkv_t[:, :, None, :])
input_attn_mask = input_attn_mask.reshape(batch_size, -1)
dot_attn_indices = dot_attn_indices.reshape(batch_size, -1)
mask = input_attn_mask.gather(1, dot_attn_indices).reshape_as(dots)
dots.masked_fill_(~mask, masked_value)
del mask
# Input mask for padding in variable lengthed sequences
if input_mask is not None:
input_mask = F.pad(input_mask, (0, seqlen - input_mask.shape[1]), value=True)
mq = input_mask.gather(1, st).reshape((batch_size, chunk_size, -1))
mkv = look_one_back(mq)
mask = mq[:, :, :, None] * mkv[:, :, None, :]
dots.masked_fill_(~mask, masked_value)
del mask
# Causal masking
if self.causal:
mask = bq_t[:, :, :, None] < bkv_t[:, :, None, :]
if seqlen > query_len:
mask = mask & (bkv_t[:, :, None, :] < query_len)
dots.masked_fill_(mask, masked_value)
del mask
# Mask out attention to self except when no other targets are available.
self_mask = bq_t[:, :, :, None] == bkv_t[:, :, None, :]
dots.masked_fill_(self_mask, TOKEN_SELF_ATTN_VALUE)
del self_mask
# Mask out attention to other hash buckets.
if not self._attend_across_buckets:
bq_buckets = bkv_buckets = torch.reshape(sbuckets_and_t // seqlen, (batch_size, chunk_size, -1))
bkv_buckets = look_one_back(bkv_buckets)
bucket_mask = bq_buckets[:, :, :, None] != bkv_buckets[:, :, None, :]
dots.masked_fill_(bucket_mask, masked_value)
del bucket_mask
# Don't double-count query-key pairs across multiple rounds of hashing.
# There are two possible strategies here. (1) The default is to count how
# many times a query-key pair is repeated, and to lower its log-prob
# correspondingly at each repetition. (2) When hard_k is set, the code
# instead masks all but the first occurence of each query-key pair.
if not self._allow_duplicate_attention:
locs1 = undo_sort // bq_t.shape[-1]
locs2 = (locs1 + 1) % chunk_size
if not self._attend_across_buckets:
locs1 = buckets * chunk_size + locs1
locs2 = buckets * chunk_size + locs2
locs = torch.cat([
torch.reshape(locs1, (batch_size, total_hashes, seqlen)),
torch.reshape(locs2, (batch_size, total_hashes, seqlen)),
], 1).permute((0, 2, 1))
slocs = batched_index_select(locs, st)
b_locs = torch.reshape(slocs, (batch_size, chunk_size, -1, 2 * total_hashes))
b_locs1 = b_locs[:, :, :, None, :total_hashes]
bq_locs = b_locs1.expand(b_locs.shape[:3] + (2, total_hashes))
bq_locs = torch.reshape(bq_locs, b_locs.shape)
bkv_locs = look_one_back(b_locs)
dup_counts = (bq_locs[:, :, :, None, :] == bkv_locs[:, :, None, :, :])
# for memory considerations, chunk summation of last dimension for counting duplicates
dup_counts = chunked_sum(dup_counts, chunks=(total_hashes * batch_size))
dup_counts = dup_counts.detach()
assert dup_counts.shape == dots.shape
dots = dots - torch.log(dup_counts + 1e-9)
del dup_counts
# Softmax.
dots_logsumexp = torch.logsumexp(dots, dim=-1, keepdim=True)
dots = torch.exp(dots - dots_logsumexp).type_as(dots)
dropped_dots = self.dropout(dots)
bo = torch.einsum('buij,buje->buie', dropped_dots, bv)
so = torch.reshape(bo, (batch_size, -1, dim))
slogits = torch.reshape(dots_logsumexp, (batch_size, -1,))
# unsort logits
o = batched_index_select(so, undo_sort)
logits = slogits.gather(1, undo_sort)
o = torch.reshape(o, (batch_size, total_hashes, seqlen, dim))
logits = torch.reshape(logits, (batch_size, total_hashes, seqlen, 1))
if query_len != seqlen:
query_slice = (slice(None), slice(None), slice(0, query_len))
o, logits = o[query_slice], logits[query_slice]
probs = torch.exp(logits - torch.logsumexp(logits, dim=1, keepdim=True))
out = torch.sum(o * probs, dim=1)
attn = torch.empty(0, device=device)
# return unsorted attention weights
if self._return_attn:
attn_unsort = ((bq_t * seqlen)[:, :, :, None] + bkv_t[:, :, None, :])
attn_unsort = attn_unsort.view(batch_size * total_hashes, -1).long()
unsorted_dots = torch.zeros(batch_size * total_hashes, seqlen * seqlen, device=device)
unsorted_dots.scatter_add_(1, attn_unsort, dots.view_as(attn_unsort))
del attn_unsort
unsorted_dots = unsorted_dots.reshape(batch_size, total_hashes, seqlen, seqlen)
attn = torch.sum(unsorted_dots[:, :, 0:query_len, :] * probs, dim=1)
# return output, attention matrix, and bucket distribution
return out, attn, buckets
# simple full attention
class FullQKAttention(nn.Module):
def __init__(self, causal = False, dropout = 0.):
super().__init__()
self.causal = causal
self.dropout = nn.Dropout(dropout)
def forward(self, qk, v, query_len = None, input_mask = None, input_attn_mask = None, **kwargs):
b, seq_len, dim = qk.shape
query_len = default(query_len, seq_len)
t = query_len
q = qk[:, 0:query_len]
qk = F.normalize(qk, 2, dim=-1).type_as(q)
dot = torch.einsum('bie,bje->bij', q, qk) * (dim ** -0.5)
# qk attention requires tokens not attend to self
i = torch.arange(t)
dot[:, i, i] = TOKEN_SELF_ATTN_VALUE
masked_value = max_neg_value(dot)
# Input mask for padding in variable lengthed sequences
if input_mask is not None:
mask = input_mask[:, 0:query_len, None] * input_mask[:, None, :]
mask = F.pad(mask, (0, seq_len - mask.shape[-1]), value=True)
dot.masked_fill_(~mask, masked_value)
# Mask for post qk attention logits of the input sequence
if input_attn_mask is not None:
input_attn_mask = F.pad(input_attn_mask, (0, seq_len - input_attn_mask.shape[-1]), value=True)
dot.masked_fill_(~input_attn_mask, masked_value)
if self.causal:
i, j = torch.triu_indices(t, t, 1)
dot[:, i, j] = masked_value
dot = dot.softmax(dim=-1)
dot = self.dropout(dot)
out = torch.einsum('bij,bje->bie', dot, v)
return out, dot, torch.empty(0)
# Shared qk attention, using either full or LSH attention
class LSHSelfAttention(nn.Module):
def __init__(self, dim, heads = 8, bucket_size = 64, n_hashes = 8, causal = False, dim_head = None, attn_chunks = 1, random_rotations_per_head = False, attend_across_buckets = True, allow_duplicate_attention = True, num_mem_kv = 0, one_value_head = False, use_full_attn = False, full_attn_thres = None, return_attn = False, post_attn_dropout = 0., dropout = 0., n_local_attn_heads = 0, **kwargs):
super().__init__()
assert dim_head or (dim % heads) == 0, 'dimensions must be divisible by number of heads'
assert n_local_attn_heads < heads, 'local attention heads must be less than number of heads'
dim_head = default(dim_head, dim // heads)
dim_heads = dim_head * heads
self.dim = dim
self.heads = heads
self.dim_head = dim_head
self.attn_chunks = default(attn_chunks, 1)
self.v_head_repeats = (heads if one_value_head else 1)
v_dim = dim_heads // self.v_head_repeats
self.toqk = nn.Linear(dim, dim_heads, bias = False)
self.tov = nn.Linear(dim, v_dim, bias = False)
self.to_out = nn.Linear(dim_heads, dim)
self.bucket_size = bucket_size
self.lsh_attn = LSHAttention(bucket_size=bucket_size, n_hashes=n_hashes, causal=causal, random_rotations_per_head=random_rotations_per_head, attend_across_buckets = attend_across_buckets, allow_duplicate_attention = allow_duplicate_attention, return_attn = return_attn, dropout = dropout, **kwargs)
self.full_attn = FullQKAttention(causal=causal, dropout=dropout)
self.post_attn_dropout = nn.Dropout(post_attn_dropout)
self.use_full_attn = use_full_attn
self.full_attn_thres = default(full_attn_thres, bucket_size)
self.num_mem_kv = num_mem_kv
self.mem_kv = nn.Parameter(torch.randn(1, num_mem_kv, dim, requires_grad=True)) if num_mem_kv > 0 else None
self.n_local_attn_heads = n_local_attn_heads
self.local_attn = LocalAttention(window_size=bucket_size * 2, causal=causal, dropout=dropout, shared_qk=True, look_forward=(1 if not causal else 0))
self.callback = None
def forward(self, x, keys = None, input_mask = None, input_attn_mask = None, context_mask = None, pos_emb = None, **kwargs):
device, dtype = x.device, x.dtype
b, t, e, h, dh, m, l_h = *x.shape, self.heads, self.dim_head, self.num_mem_kv, self.n_local_attn_heads
mem_kv = default(self.mem_kv, torch.empty(b, 0, e, dtype=dtype, device=device))
mem = mem_kv.expand(b, m, -1)
keys = default(keys, torch.empty(b, 0, e, dtype=dtype, device=device))
c = keys.shape[1]
kv_len = t + m + c
use_full_attn = self.use_full_attn or kv_len <= self.full_attn_thres
x = torch.cat((x, mem, keys), dim=1)
qk = self.toqk(x)
v = self.tov(x)
v = v.repeat(1, 1, self.v_head_repeats)
def merge_heads(v):
return v.view(b, kv_len, h, -1).transpose(1, 2)
def split_heads(v):
return v.view(b, h, t, -1).transpose(1, 2).contiguous()
merge_batch_and_heads = partial(merge_dims, 0, 1)
qk, v = map(merge_heads, (qk, v))
has_local = l_h > 0
lsh_h = h - l_h
split_index_fn = partial(split_at_index, 1, l_h)
(lqk, qk), (lv, v) = map(split_index_fn, (qk, v))
lqk, qk, lv, v = map(merge_batch_and_heads, (lqk, qk, lv, v))
masks = {}
if input_mask is not None or context_mask is not None:
default_mask = torch.tensor([True], device=device)
i_mask = default(input_mask, default_mask.expand(b, t))
m_mask = default_mask.expand(b, m)
c_mask = default(context_mask, default_mask.expand(b, c))
mask = torch.cat((i_mask, m_mask, c_mask), dim=1)
mask = merge_batch_and_heads(expand_dim(1, lsh_h, mask))
masks['input_mask'] = mask
if input_attn_mask is not None:
input_attn_mask = merge_batch_and_heads(expand_dim(1, lsh_h, input_attn_mask))
masks['input_attn_mask'] = input_attn_mask
attn_fn = self.lsh_attn if not use_full_attn else self.full_attn
partial_attn_fn = partial(attn_fn, query_len = t, pos_emb = pos_emb, **kwargs)
attn_fn_in_chunks = process_inputs_chunk(partial_attn_fn, chunks = self.attn_chunks)
out, attn, buckets = attn_fn_in_chunks(qk, v, **masks)
if self.callback is not None:
self.callback(attn.reshape(b, lsh_h, t, -1), buckets.reshape(b, lsh_h, -1))
if has_local:
lqk, lv = lqk[:, :t], lv[:, :t]
local_out = self.local_attn(lqk, lqk, lv, input_mask=input_mask)
local_out = local_out.reshape(b, l_h, t, -1)
out = out.reshape(b, lsh_h, t, -1)
out = torch.cat((local_out, out), dim=1)
out = split_heads(out).view(b, t, -1)
out = self.to_out(out)
return self.post_attn_dropout(out)
# feed forward
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x):
t = torch.arange(x.shape[1], device=x.device)
return self.emb(t)
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x, seq_dim = 1):
t = torch.arange(x.shape[seq_dim], device = x.device).type_as(self.inv_freq)
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
return emb[None, :, :].type_as(x)
# rotary positional embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(qk, sinu_pos):
sinu_pos = sinu_pos.type(qk.dtype)
sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j = 2)
sin, cos = sinu_pos.unbind(dim = -2)
sin, cos = map(lambda t: repeat(t, 'n d -> n (d j)', j = 2), (sin, cos))
seq_len = sin.shape[0]
qk, qk_pass = qk[:, :seq_len], qk[:, seq_len:]
qk = (qk * cos) + (rotate_every_two(qk) * sin)
return torch.cat((qk, qk_pass), dim = 1)
# reformer lm
class Reformer(nn.Module):
def __init__(self, dim, depth, heads = 8, dim_head = None, bucket_size = 64, n_hashes = 8, ff_chunks = 100, attn_chunks = None, causal = False, weight_tie = False, lsh_dropout = 0., ff_dropout = 0., ff_activation = None, ff_mult = 4, ff_glu = False, post_attn_dropout = 0., layer_dropout = 0., lsh_attend_across_buckets = True, lsh_allow_duplicate_attention = True, random_rotations_per_head = False, use_scale_norm = False, use_rezero = False, use_full_attn = False, full_attn_thres = 0, reverse_thres = 0, num_mem_kv = 0, one_value_head = False, n_local_attn_heads = 0, pkm_layers = tuple(), pkm_num_keys = 128):
super().__init__()
self.dim = dim
self.depth = depth
self.bucket_size = bucket_size
self.num_mem_kv = num_mem_kv
self.full_attn_thres = full_attn_thres
get_attn = lambda: LSHSelfAttention(dim, heads, bucket_size, n_hashes, causal = causal, dim_head = dim_head, dropout = lsh_dropout, post_attn_dropout = post_attn_dropout, attn_chunks = attn_chunks, allow_duplicate_attention = lsh_allow_duplicate_attention, attend_across_buckets = lsh_attend_across_buckets, random_rotations_per_head = random_rotations_per_head, num_mem_kv = num_mem_kv, use_full_attn = use_full_attn, full_attn_thres = full_attn_thres, one_value_head = one_value_head, n_local_attn_heads = n_local_attn_heads)
get_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, activation = ff_activation, mult = ff_mult, glu = ff_glu), along_dim = -2)
get_pkm = lambda: PKM(dim, num_keys = pkm_num_keys)
if weight_tie:
get_attn, get_ff, get_pkm = map(cache_fn, (get_attn, get_ff, get_pkm))
blocks = []
norm_type = ScaleNorm if use_scale_norm else nn.LayerNorm
residual_fn_wrapper = ReZero if use_rezero else partial(PreNorm, norm_type, dim)
for ind in range(depth):
layer_num = ind + 1
use_pkm = layer_num in cast_tuple(pkm_layers)
parallel_net = None
attn = get_attn()
if use_pkm:
parallel_net = get_pkm()
else:
parallel_net = get_ff()
f = residual_fn_wrapper(attn)
g = residual_fn_wrapper(parallel_net)
blocks.append(nn.ModuleList([f, g]))
self.layers = ReversibleSequence(nn.ModuleList(blocks), layer_dropout = layer_dropout, reverse_thres = reverse_thres, send_signal = True)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
x = self.layers(x, **kwargs)
return torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
class ReformerLM(nn.Module):
def __init__(self, num_tokens, dim, depth, max_seq_len, heads = 8, dim_head = 64, bucket_size = 64, n_hashes = 4, ff_chunks = 100, attn_chunks = 1, causal = False, weight_tie = False, lsh_dropout = 0., ff_dropout = 0., ff_mult = 4, ff_activation = None, ff_glu = False, post_attn_dropout = 0., layer_dropout = 0., random_rotations_per_head = False, use_scale_norm = False, use_rezero = False, use_full_attn = False, full_attn_thres = 0, reverse_thres = 0, num_mem_kv = 0, one_value_head = False, emb_dim = None, return_embeddings = False, weight_tie_embedding = False, fixed_position_emb = False, absolute_position_emb = False, axial_position_emb = False, axial_position_shape = None, n_local_attn_heads = 0, pkm_layers = tuple(), pkm_num_keys = 128):
super().__init__()
emb_dim = default(emb_dim, dim)
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, emb_dim)
self.to_model_dim = Identity() if emb_dim == dim else nn.Linear(emb_dim, dim)
self.pos_emb = Always(0)
self.layer_pos_emb = Always(None)
if axial_position_emb:
axial_position_shape = default(axial_position_shape, (math.ceil(max_seq_len / bucket_size), bucket_size))
self.pos_emb = AxialPositionalEmbedding(emb_dim, axial_position_shape)
elif absolute_position_emb:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len)
elif fixed_position_emb:
self.pos_emb = FixedPositionalEmbedding(emb_dim)
else:
self.layer_pos_emb = FixedPositionalEmbedding(dim_head)
self.reformer = Reformer(dim, depth, heads = heads, dim_head = dim_head, bucket_size = bucket_size, n_hashes = n_hashes, ff_chunks = ff_chunks, attn_chunks = attn_chunks, causal = causal, weight_tie = weight_tie, lsh_dropout = lsh_dropout, ff_mult = ff_mult, ff_activation = ff_activation, ff_glu = ff_glu, ff_dropout = ff_dropout, post_attn_dropout = 0., layer_dropout = layer_dropout, random_rotations_per_head = random_rotations_per_head, use_scale_norm = use_scale_norm, use_rezero = use_rezero, use_full_attn = use_full_attn, full_attn_thres = full_attn_thres, reverse_thres = reverse_thres, num_mem_kv = num_mem_kv, one_value_head = one_value_head, n_local_attn_heads = n_local_attn_heads, pkm_layers = pkm_layers, pkm_num_keys = pkm_num_keys)
self.norm = nn.LayerNorm(dim)
if return_embeddings:
self.out = Identity()
return
self.out = nn.Sequential(
nn.Linear(dim, emb_dim) if emb_dim != dim else Identity(),
nn.Linear(emb_dim, num_tokens) if not weight_tie_embedding else MatrixMultiply(self.token_emb.weight, transpose=True, normalize=True)
)
def forward(self, x, **kwargs):
x = self.token_emb(x)
x = x + self.pos_emb(x)
layer_pos_emb = self.layer_pos_emb(x)
x = self.to_model_dim(x)
x = self.reformer(x, pos_emb = layer_pos_emb, **kwargs)
x = self.norm(x)
return self.out(x)
| reformer-pytorch-master | reformer_pytorch/reformer_pytorch.py |
import deepspeed
from reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
def add_argument():
parser=argparse.ArgumentParser(description='enwik8')
parser.add_argument('--with_cuda', default=False, action='store_true',
help='use CPU in case there\'s no GPU support')
parser.add_argument('--use_ema', default=False, action='store_true',
help='whether use exponential moving average')
parser.add_argument('-b', '--batch_size', default=32, type=int,
help='mini-batch size (default: 32)')
parser.add_argument('-e', '--epochs', default=30, type=int,
help='number of total epochs (default: 30)')
parser.add_argument('--local_rank', type=int, default=-1,
help='local rank passed from distributed launcher')
parser = deepspeed.add_config_arguments(parser)
args=parser.parse_args()
return args
# constants
EPOCHS = 20
GRADIENT_ACCUMULATE_EVERY = 4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 4096
# helpers
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = ReformerLM(
dim = 512,
depth = 6,
max_seq_len = SEQ_LEN,
num_tokens = 256,
heads = 8,
bucket_size = 64,
n_hashes = 4,
ff_chunks = 10,
lsh_dropout = 0.1,
weight_tie = True,
causal = True,
n_local_attn_heads = 4,
use_full_attn = False # set this to true for comparison with full attention
)
model = TrainingWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
# setup deepspeed
cmd_args = add_argument()
model_engine, optimizer, trainloader, _ = deepspeed.initialize(args=cmd_args, model=model, model_parameters=model.parameters(), training_data=train_dataset)
# training
for _ in range(EPOCHS):
for i, data in enumerate(trainloader):
model_engine.train()
data = data.to(model_engine.local_rank)
loss = model_engine(data, return_loss = True)
model_engine.backward(loss)
model_engine.step()
print(loss.item() * GRADIENT_ACCUMULATE_EVERY)
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
inp = random.choice(val_dataset)[:-1]
loss = model(inp[None, :].cuda(), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp.cuda(), GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| reformer-pytorch-master | examples/enwik8_deepspeed/train.py |
from reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 4096
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = ReformerLM(
dim = 512,
depth = 6,
max_seq_len = SEQ_LEN,
num_tokens = 256,
heads = 8,
bucket_size = 64,
n_hashes = 4,
ff_chunks = 10,
lsh_dropout = 0.1,
weight_tie = True,
causal = True,
n_local_attn_heads = 4,
use_full_attn = False # set this to true for comparison with full attention
)
model = TrainingWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| reformer-pytorch-master | examples/enwik8_simple/train.py |
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, random_split
from tqdm import tqdm
from reformer_pytorch import Reformer, ReformerLM
from transformers import BertTokenizer, PreTrainedTokenizer
from fairseq.optim.adafactor import Adafactor
import os
import json
import logging
from datetime import datetime
class WikiDataset(Dataset):
def __init__(self, path="", prefix="train"):
assert os.path.isdir(path)
self.documents = []
filename_list = os.listdir(path)
for file in filename_list:
path_to_file = os.path.join(path, file)
if not os.path.isfile(path_to_file):
continue
self.documents.append(path_to_file)
def __len__(self):
""" Returns the number of documents. """
return len(self.documents)
def __getitem__(self, idx):
document_path = self.documents[idx]
document_name = document_path.split("/")[-1]
items = []
with open(document_path, encoding="utf-8") as source:
raw_text = source.readlines()
for obj in raw_text:
text = json.loads(obj)['text']
text = re.sub('\\n', ' ', text)
text = re.sub('\\s+', ' ', text)
items.append(text)
return items
class ReformerTrainer(object):
def __init__(self,
dataset,
model,
tokenizer,
device=None,
train_batch_size=8,
eval_batch_size=None,
tb_writer=True,
tb_dir='./tb_logs',
log_dir='./logs'):
"""
Provides an easy to use class for pretraining and evaluating a Reformer Model.
:param dataset: (torch.utils.data.Dataset) containing all of the data you wish to utilize during training.
:param model: (reformer_pytorch.Reformer)
:param tokenizer: (transformers.PreTrainedTokenizer) defaults to BertTokenizer ('bert-base-case')
:param device: provide manual device placement. If None, will default to cuda:0 if available.
:param tb_writer: (bool) Whether to write to tensorboard or not.
:param tb_dir: (str) Where to write TB logs to.
:param log_dir: (str) Where to write generic logs to.
"""
self.dataset = dataset
self.model = model
self.tokenizer = tokenizer
self.device = device
self.n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.tb_writer = tb_writer
self.log_dir = log_dir
if tokenizer is None:
self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
if device is None:
self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
if eval_batch_size is None:
self.eval_batch_size = train_batch_size
if tb_writer:
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter(log_dir=tb_dir)
logging.basicConfig(filename=f'{log_dir}/{datetime.now().date()}.log', level=logging.INFO)
def build_dataloaders(self, train_test_split=0.1, train_shuffle=True, eval_shuffle=True):
"""
Builds the Training and Eval DataLoaders
:param train_test_split: The ratio split of test to train data.
:param train_shuffle: (bool) True if you wish to shuffle the train_dataset.
:param eval_shuffle: (bool) True if you wish to shuffle the eval_dataset.
:return: train dataloader and evaluation dataloader.
"""
dataset_len = len(self.dataset)
eval_len = int(dataset_len * train_test_split)
train_len = dataset_len - eval_len
train_dataset, eval_dataset = random_split(self.dataset, (train_len, eval_len))
train_loader = DataLoader(train_dataset, batch_size=self.train_batch_size, shuffle=train_shuffle)
eval_loader = DataLoader(eval_dataset, batch_size=self.eval_batch_size, shuffle=eval_shuffle)
logging.info(f'''train_dataloader size: {len(train_loader.dataset)} | shuffle: {train_shuffle}
eval_dataloader size: {len(eval_loader.dataset)} | shuffle: {eval_shuffle}''')
return train_loader, eval_loader
def mask_tokens(self, inputs: torch.Tensor, mlm_probability=0.15, pad=True):
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
labels = inputs.clone()
# mlm_probability defaults to 0.15 in Bert
probability_matrix = torch.full(labels.shape, mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
if pad:
input_pads = self.tokenizer.max_len - inputs.shape[-1]
label_pads = self.tokenizer.max_len - labels.shape[-1]
inputs = F.pad(inputs, pad=(0, input_pads), value=self.tokenizer.pad_token_id)
labels = F.pad(labels, pad=(0, label_pads), value=self.tokenizer.pad_token_id)
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def _tokenize_input_ids(self, input_ids: list, pad_to_max_length: bool = True):
"""
Helper function to clean up the train and eval functions
:param input_ids: inputs to tokenize.
:param pad_to_max_length: Whether you want to pad the inputs to the tokenizer.max_len
:return: Tensor containing training data.
"""
inputs = torch.cat(
[
self.tokenizer.encode(
input_ids[i],
add_special_tokens=True,
max_length=self.tokenizer.max_len,
pad_to_max_length=pad_to_max_length,
return_tensors='pt'
) \
for i in range(len(input_ids))
]
)
return inputs
def train(self,
epochs,
train_dataloader,
eval_dataloader,
log_steps,
ckpt_steps,
ckpt_dir=None,
gradient_accumulation_steps=1):
"""
Trains the Reformer Model
:param epochs: The number of times you wish to loop through the dataset.
:param train_dataloader: (torch.utils.data.DataLoader) The data to train on.
:param eval_dataloader: (torch.utils.data.DataLoader) The data to evaluate on.
:param log_steps: The number of steps to iterate before logging.
:param ckpt_steps: The number of steps to iterate before checkpointing.
:param ckpt_dir: The directory to save the checkpoints to.
:param gradient_accumulation_steps: Optional gradient accumulation.
:return: Total number of steps, total loss, model
"""
optimizer = Adafactor(self.model.parameters())
loss_fn = nn.CrossEntropyLoss()
losses = {}
global_steps = 0
local_steps = 0
step_loss = 0.0
if ckpt_dir is not None:
assert os.path.isdir(ckpt_dir)
try:
logging.info(f'{datetime.now()} | Continuing from checkpoint...')
self.model.load_state_dict(torch.load(f'{ckpt_dir}/model_state_dict.pt', map_location=self.device))
optimizer.load_state_dict(torch.load(f'{ckpt_dir}/optimizer_state_dict.pt'))
except Exception as e:
logging.info(f'{datetime.now()} | No checkpoint was found | {e}')
self.model.train()
if self.n_gpu > 1:
self.model = nn.DataParallel(self.model)
logging.info(f'{datetime.now()} | Utilizing {self.n_gpu} GPUs')
self.model.to(self.device)
logging.info(f'{datetime.now()} | Moved model to: {self.device}')
logging.info(
f'{datetime.now()} | train_batch_size: {self.train_batch_size} | eval_batch_size: {self.eval_batch_size}')
logging.info(f'{datetime.now()} | Epochs: {epochs} | log_steps: {log_steps} | ckpt_steps: {ckpt_steps}')
logging.info(f'{datetime.now()} | gradient_accumulation_steps: {gradient_accumulation_steps}')
for epoch in tqdm(range(epochs), desc='Epochs', position=0):
logging.info(f'{datetime.now()} | Epoch: {epoch}')
for step, batch in tqdm(enumerate(train_dataloader),
desc='Epoch Iterator',
position=1,
leave=True,
total=len(train_dataloader)):
for data in batch:
inputs = self._tokenize_input_ids(data, pad_to_max_length=True)
inputs, labels = self.mask_tokens(inputs)
inputs, labels = inputs.to(self.device), labels.to(self.device)
output = self.model(inputs)
# only calculating loss on masked tokens
loss_mx = labels != -100
output = output[loss_mx].view(-1, self.tokenizer.vocab_size)
labels = labels[loss_mx].view(-1)
loss = loss_fn(output, labels)
if gradient_accumulation_steps > 1:
loss /= gradient_accumulation_steps
loss.backward()
step_loss += loss.item()
losses[global_steps] = loss.item()
local_steps += 1
global_steps += 1
if global_steps % gradient_accumulation_steps == 0:
optimizer.step()
self.model.zero_grad()
if global_steps % log_steps == 0:
if self.tb_writer:
self.writer.add_scalar('Train/Loss', step_loss / local_steps, global_steps)
self.writer.close()
logging.info(
f'''{datetime.now()} | Train Loss: {step_loss / local_steps} | Steps: {global_steps}''')
with open(f'{self.log_dir}/train_results.json', 'w') as results_file:
json.dump(losses, results_file)
results_file.close()
step_loss = 0.0
local_steps = 0
if global_steps % ckpt_steps == 0:
# evaluating before every checkpoint
self.evaluate(eval_dataloader)
model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
torch.save(model_to_save.state_dict(), f'{ckpt_dir}/model_state_dict.pt')
torch.save(optimizer.state_dict(), f'{ckpt_dir}/optimizer_state_dict.pt')
logging.info(f'{datetime.now()} | Saved checkpoint to: {ckpt_dir}')
model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
torch.save(model_to_save.state_dict(), f'{ckpt_dir}/model_state_dict.pt')
torch.save(optimizer.state_dict(), f'{ckpt_dir}/optimizer_state_dict.pt')
return self.model
def evaluate(self, dataloader):
"""
Runs through the provided dataloader with torch.no_grad()
:param dataloader: (torch.utils.data.DataLoader) Evaluation DataLoader
:return: None
"""
loss_fn = nn.CrossEntropyLoss()
if self.n_gpu > 1 and not isinstance(self.model, nn.DataParallel):
self.model = nn.DataParallel(self.model)
self.model.eval()
eval_loss = 0.0
perplexity = 0.0
eval_steps = 0
logging.info(f'{datetime.now()} | Evaluating...')
for step, batch in tqdm(enumerate(dataloader), desc='Evaluating', leave=True, total=len(dataloader)):
for data in batch:
inputs = self._tokenize_input_ids(data, pad_to_max_length=True)
inputs, labels = self.mask_tokens(inputs)
inputs, labels = inputs.to(self.device), labels.to(self.device)
with torch.no_grad():
output = self.model(inputs)
loss_mx = labels != -100
output_ids = output[loss_mx].view(-1, self.tokenizer.vocab_size)
labels = labels[loss_mx].view(-1)
tmp_eval_loss = loss_fn(output_ids, labels)
tmp_perplexity = torch.exp(tmp_eval_loss)
if self.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
perplexity += tmp_perplexity.item()
eval_steps += 1
eval_loss /= eval_steps
perplexity /= eval_steps
if self.tb_writer:
self.writer.add_scalar('Eval/Loss', eval_loss, eval_steps)
self.writer.close()
self.writer.add_scalar('Perplexity', perplexity, eval_steps)
self.writer.close()
logging.info(f'{datetime.now()} | Step: {step} | Eval Loss: {eval_loss} | Perplexity: {perplexity}')
return None
if __name__ == '__main__':
dataset = WikiDataset(path='D:/data/enwiki')
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
tokenizer.max_len = 128
model = ReformerLM(
num_tokens=tokenizer.vocab_size,
dim=512,
depth=6,
heads=8,
max_seq_len=tokenizer.max_len,
causal=True
)
trainer = ReformerTrainer(dataset, model, tokenizer, train_batch_size=32, eval_batch_size=32)
train_dataloader, eval_dataloader = trainer.build_dataloaders(train_test_split=0.90)
model = trainer.train(epochs=3,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
log_steps=10,
ckpt_steps=100,
ckpt_dir='./ckpts',
gradient_accumulation_steps=1)
torch.save(model, './ckpts/model.bin')
| reformer-pytorch-master | pretraining/self-supervised.py |
from setuptools import setup, find_packages
setup(
name = 'tranception-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.8',
license='MIT',
description = 'Tranception - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/tranception-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'protein fitness'
],
install_requires=[
'einops>=0.4',
'einops-exts',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| tranception-pytorch-main | setup.py |
from tranception_pytorch.tranception_pytorch import Tranception
| tranception-pytorch-main | tranception_pytorch/__init__.py |
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
from einops_exts import rearrange_many
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# relative positional bias
class LearnedAlibiPosBias(nn.Module):
def __init__(self, heads):
super().__init__()
self.heads = heads
slopes = torch.Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.slopes = nn.Parameter(slopes)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(i, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
def forward(self, qk_sim):
h, i, j, device = *qk_sim.shape[-3:], qk_sim.device
if exists(self.bias) and self.bias.shape[-1] >= j:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = F.pad(bias, (0, 0, 0, 0, 0, num_heads_unalibied))
self.register_buffer('bias', bias, persistent = False)
return bias
# helper classes
class ReluSquared(nn.Module):
""" found with neural architecture search in Primer paper """
def forward(self, x):
return F.relu(x) ** 2
def FeedForward(dim, mult = 4):
hidden_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, hidden_dim),
ReluSquared(),
nn.Linear(hidden_dim, dim)
)
class DepthwiseConv1d(nn.Module):
def __init__(self, dim, kernel_size, causal = True):
super().__init__()
assert (kernel_size % 2) == 1
self.padding = (kernel_size - 1, 0) if causal else (kernel_size // 2, kernel_size // 2)
self.conv = nn.Conv1d(dim, dim, kernel_size = kernel_size, groups = dim)
def forward(self, x):
x = F.pad(x, self.padding)
return self.conv(x)
class Attention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
causal = False,
ds_conv_kernel_sizes = (0, 3, 5, 7) # heads were grouped into 4 groups and given a depthwise conv after the queries / keys / values projection
):
super().__init__()
self.groups = len(ds_conv_kernel_sizes)
assert heads >= self.groups and (heads % self.groups) == 0, f'heads must be greater than {self.groups} and divisible by {self.groups}'
self.scale = dim_head ** -0.5
self.causal = causal
self.heads = heads
self.heads_per_group = heads // self.groups
inner_dim = heads * dim_head
self.norm = nn.LayerNorm(dim)
self.to_qkv = nn.Conv1d(dim, inner_dim * 3, 1, bias = False)
# ds convs with different kernel sizes for 4 groups of heads
self.qkv_ds_convs = nn.ModuleList([])
for _ in range(3): # for queries, keys, values
ds_convs = nn.ModuleList([])
for kernel_size in ds_conv_kernel_sizes:
if kernel_size == 0:
ds_convs.append(nn.Identity())
continue
ds_convs.append(DepthwiseConv1d(dim_head * self.heads_per_group, kernel_size, causal = causal))
self.qkv_ds_convs.append(ds_convs)
# learned alibi positional bias for 4 groups of heads
self.learned_alibi_pos_biases = nn.ModuleList([LearnedAlibiPosBias(heads = self.heads_per_group) for _ in range(self.groups)])
# outward projection
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x):
device, heads_per_group = x.device, self.heads_per_group
x = self.norm(x)
x = rearrange(x, 'b n d -> b d n')
q, k, v = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = rearrange_many((q, k, v), 'b (h d) n -> b h d n', h = self.heads)
# apply causal depthwise conv to queries, keys, values (a la Primer) with different kernel sizes across 4 groups of heads
def apply_causal_ds_conv_to_grouped_heads(args):
projs, ds_convs = args
batch = projs.shape[0]
projs = rearrange_many(projs.split(heads_per_group, dim = 1), 'b h d n -> b (h d) n')
conv_out = [fn(t) for fn, t in zip(ds_convs, projs)]
conv_out = map(lambda t: rearrange(t, 'b (h d) n -> b h d n', h = heads_per_group), conv_out)
conv_out = torch.cat(tuple(conv_out), dim = 1)
return rearrange(conv_out, 'b h d n -> b h n d')
q, k, v = map(apply_causal_ds_conv_to_grouped_heads, zip((q, k, v), self.qkv_ds_convs))
# scale and similarity
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# learned alibi pos bias across 4 groups of heads
# so heads specialize to looking at different distances of kmers
grouped_sims = sim.split(self.heads // self.groups, dim = 1)
grouped_sims = [(alibi(sim_group) + sim_group) for alibi, sim_group in zip(self.learned_alibi_pos_biases, grouped_sims)]
sim = torch.cat(grouped_sims, dim = 1)
# causal mask
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention, but of course
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# classes
class Tranception(nn.Module):
def __init__(
self,
*,
dim,
depth,
num_tokens = 21,
heads = 8,
dim_head = 64,
ff_mult = 4,
ds_conv_kernel_sizes = (0, 3, 5, 7),
causal = True
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, heads = heads, dim_head = dim_head, ds_conv_kernel_sizes = ds_conv_kernel_sizes, causal = causal),
FeedForward(dim, mult = ff_mult)
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(
self,
x,
mask = None
):
x = self.token_emb(x)
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.to_logits(x)
| tranception-pytorch-main | tranception_pytorch/tranception_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'g-mlp-pytorch',
packages = find_packages(),
version = '0.1.5',
license='MIT',
description = 'gMLP - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/g-mlp-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'multi-layered-preceptrons'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| g-mlp-pytorch-main | setup.py |
from g_mlp_pytorch import gMLP
from g_mlp_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 768
SEQ_LEN = 768
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = gMLP(
num_tokens = 256,
dim = 512,
seq_len = SEQ_LEN,
depth = 8,
causal = True
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| g-mlp-pytorch-main | train.py |
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
device = start_tokens.device
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
return out
def forward(self, x, **kwargs):
xi, xo = x[:, :-1], x[:, 1:]
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| g-mlp-pytorch-main | g_mlp_pytorch/autoregressive_wrapper.py |
from g_mlp_pytorch.g_mlp_pytorch import gMLP, gMLPVision, gMLPBlock, SpatialGatingUnit
| g-mlp-pytorch-main | g_mlp_pytorch/__init__.py |
from random import randrange
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange, Reduce
# functions
def exists(val):
return val is not None
def pair(val):
return (val, val) if not isinstance(val, tuple) else val
def dropout_layers(layers, prob_survival):
if prob_survival == 1:
return layers
num_layers = len(layers)
to_drop = torch.zeros(num_layers).uniform_(0., 1.) > prob_survival
# make sure at least one layer makes it
if all(to_drop):
rand_index = randrange(num_layers)
to_drop[rand_index] = False
layers = [layer for (layer, drop) in zip(layers, to_drop) if not drop]
return layers
def shift(t, amount, mask = None):
if amount == 0:
return t
return F.pad(t, (0, 0, amount, -amount), value = 0.)
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class PreShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
if self.shifts == (0,):
return self.fn(x, **kwargs)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class Attention(nn.Module):
def __init__(self, dim_in, dim_out, dim_inner, causal = False):
super().__init__()
self.scale = dim_inner ** -0.5
self.causal = causal
self.to_qkv = nn.Linear(dim_in, dim_inner * 3, bias = False)
self.to_out = nn.Linear(dim_inner, dim_out)
def forward(self, x):
device = x.device
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if self.causal:
mask = torch.ones(sim.shape[-2:], device = device).triu(1).bool()
sim.masked_fill_(mask[None, ...], -torch.finfo(q.dtype).max)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
return self.to_out(out)
class SpatialGatingUnit(nn.Module):
def __init__(
self,
dim,
dim_seq,
causal = False,
act = nn.Identity(),
heads = 1,
init_eps = 1e-3,
circulant_matrix = False
):
super().__init__()
dim_out = dim // 2
self.heads = heads
self.causal = causal
self.norm = nn.LayerNorm(dim_out)
self.act = act
# parameters
if circulant_matrix:
self.circulant_pos_x = nn.Parameter(torch.ones(heads, dim_seq))
self.circulant_pos_y = nn.Parameter(torch.ones(heads, dim_seq))
self.circulant_matrix = circulant_matrix
shape = (heads, dim_seq,) if circulant_matrix else (heads, dim_seq, dim_seq)
weight = torch.zeros(shape)
self.weight = nn.Parameter(weight)
init_eps /= dim_seq
nn.init.uniform_(self.weight, -init_eps, init_eps)
self.bias = nn.Parameter(torch.ones(heads, dim_seq))
def forward(self, x, gate_res = None):
device, n, h = x.device, x.shape[1], self.heads
res, gate = x.chunk(2, dim = -1)
gate = self.norm(gate)
weight, bias = self.weight, self.bias
if self.circulant_matrix:
# build the circulant matrix
dim_seq = weight.shape[-1]
weight = F.pad(weight, (0, dim_seq), value = 0)
weight = repeat(weight, '... n -> ... (r n)', r = dim_seq)
weight = weight[:, :-dim_seq].reshape(h, dim_seq, 2 * dim_seq - 1)
weight = weight[:, :, (dim_seq - 1):]
# give circulant matrix absolute position awareness
pos_x, pos_y = self.circulant_pos_x, self.circulant_pos_y
weight = weight * rearrange(pos_x, 'h i -> h i ()') * rearrange(pos_y, 'h j -> h () j')
if self.causal:
weight, bias = weight[:, :n, :n], bias[:, :n]
mask = torch.ones(weight.shape[-2:], device = device).triu_(1).bool()
mask = rearrange(mask, 'i j -> () i j')
weight = weight.masked_fill(mask, 0.)
gate = rearrange(gate, 'b n (h d) -> b h n d', h = h)
gate = einsum('b h n d, h m n -> b h m d', gate, weight)
gate = gate + rearrange(bias, 'h n -> () h n ()')
gate = rearrange(gate, 'b h n d -> b n (h d)')
if exists(gate_res):
gate = gate + gate_res
return self.act(gate) * res
class gMLPBlock(nn.Module):
def __init__(
self,
*,
dim,
dim_ff,
seq_len,
heads = 1,
attn_dim = None,
causal = False,
act = nn.Identity(),
circulant_matrix = False
):
super().__init__()
self.proj_in = nn.Sequential(
nn.Linear(dim, dim_ff),
nn.GELU()
)
self.attn = Attention(dim, dim_ff // 2, attn_dim, causal) if exists(attn_dim) else None
self.sgu = SpatialGatingUnit(dim_ff, seq_len, causal, act, heads, circulant_matrix = circulant_matrix)
self.proj_out = nn.Linear(dim_ff // 2, dim)
def forward(self, x):
gate_res = self.attn(x) if exists(self.attn) else None
x = self.proj_in(x)
x = self.sgu(x, gate_res = gate_res)
x = self.proj_out(x)
return x
# main classes
class gMLP(nn.Module):
def __init__(
self,
*,
num_tokens = None,
dim,
depth,
seq_len,
heads = 1,
ff_mult = 4,
attn_dim = None,
prob_survival = 1.,
causal = False,
circulant_matrix = False,
shift_tokens = 0,
act = nn.Identity()
):
super().__init__()
assert (dim % heads) == 0, 'dimension must be divisible by number of heads'
dim_ff = dim * ff_mult
self.seq_len = seq_len
self.prob_survival = prob_survival
self.to_embed = nn.Embedding(num_tokens, dim) if exists(num_tokens) else nn.Identity()
token_shifts = tuple(range(0 if causal else -shift_tokens, shift_tokens + 1))
self.layers = nn.ModuleList([Residual(PreNorm(dim, PreShiftTokens(token_shifts, gMLPBlock(dim = dim, heads = heads, dim_ff = dim_ff, seq_len = seq_len, attn_dim = attn_dim, causal = causal, act = act, circulant_matrix = circulant_matrix)))) for i in range(depth)])
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
) if exists(num_tokens) else nn.Identity()
def forward(self, x):
x = self.to_embed(x)
layers = self.layers if not self.training else dropout_layers(self.layers, self.prob_survival)
out = nn.Sequential(*layers)(x)
return self.to_logits(out)
class gMLPVision(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
num_classes,
dim,
depth,
heads = 1,
ff_mult = 4,
channels = 3,
attn_dim = None,
prob_survival = 1.
):
super().__init__()
assert (dim % heads) == 0, 'dimension must be divisible by number of heads'
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
assert (image_height % patch_height) == 0 and (image_width % patch_width) == 0, 'image height and width must be divisible by patch size'
num_patches = (image_height // patch_height) * (image_width // patch_width)
dim_ff = dim * ff_mult
self.to_patch_embed = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (c p1 p2)', p1 = patch_height, p2 = patch_width),
nn.Linear(channels * patch_height * patch_width, dim)
)
self.prob_survival = prob_survival
self.layers = nn.ModuleList([Residual(PreNorm(dim, gMLPBlock(dim = dim, heads = heads, dim_ff = dim_ff, seq_len = num_patches, attn_dim = attn_dim))) for i in range(depth)])
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
Reduce('b n d -> b d', 'mean'),
nn.Linear(dim, num_classes)
)
def forward(self, x):
x = self.to_patch_embed(x)
layers = self.layers if not self.training else dropout_layers(self.layers, self.prob_survival)
x = nn.Sequential(*layers)(x)
return self.to_logits(x)
| g-mlp-pytorch-main | g_mlp_pytorch/g_mlp_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'charformer-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Charformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/charformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'learned tokenization'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| charformer-pytorch-main | setup.py |
from charformer_pytorch.charformer_pytorch import GBST
| charformer-pytorch-main | charformer_pytorch/__init__.py |
import math
from math import gcd
import functools
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def lcm(*numbers):
return int(functools.reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1))
def masked_mean(tensor, mask, dim = -1):
diff_len = len(tensor.shape) - len(mask.shape)
mask = mask[(..., *((None,) * diff_len))]
tensor.masked_fill_(~mask, 0.)
total_el = mask.sum(dim = dim)
mean = tensor.sum(dim = dim) / total_el.clamp(min = 1.)
mean.masked_fill_(total_el == 0, 0.)
return mean
def next_divisible_length(seqlen, multiple):
return math.ceil(seqlen / multiple) * multiple
def pad_to_multiple(tensor, multiple, *, seq_dim, dim = -1, value = 0.):
seqlen = tensor.shape[seq_dim]
length = next_divisible_length(seqlen, multiple)
if length == seqlen:
return tensor
remainder = length - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value = value)
# helper classes
class Pad(nn.Module):
def __init__(self, padding, value = 0.):
super().__init__()
self.padding = padding
self.value = value
def forward(self, x):
return F.pad(x, self.padding, value = self.value)
class DepthwiseConv1d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size):
super().__init__()
self.conv = nn.Conv1d(dim_in, dim_out, kernel_size, groups = dim_in)
self.proj_out = nn.Conv1d(dim_out, dim_out, 1)
def forward(self, x):
x = self.conv(x)
return self.proj_out(x)
# main class
class GBST(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
max_block_size = None,
blocks = None,
downsample_factor = 4,
score_consensus_attn = True
):
super().__init__()
assert exists(max_block_size) ^ exists(blocks), 'either max_block_size or blocks are given on initialization'
self.token_emb = nn.Embedding(num_tokens, dim)
if exists(blocks):
assert isinstance(blocks, tuple), 'blocks must be a tuple of block sizes'
self.blocks = tuple(map(lambda el: el if isinstance(el, tuple) else (el, 0), blocks))
assert all([(offset < block_size) for block_size, offset in self.blocks]), 'offset must be always smaller than the block size'
max_block_size = max(list(map(lambda t: t[0], self.blocks)))
else:
self.blocks = tuple(map(lambda el: (el, 0), range(1, max_block_size + 1)))
self.pos_conv = nn.Sequential(
Pad((0, 0, 0, max_block_size - 1)),
Rearrange('b n d -> b d n'),
DepthwiseConv1d(dim, dim, kernel_size = max_block_size),
Rearrange('b d n -> b n d')
)
self.score_fn = nn.Sequential(
nn.Linear(dim, 1),
Rearrange('... () -> ...')
)
self.score_consensus_attn = score_consensus_attn
assert downsample_factor <= max_block_size, 'final downsample factor should be less than the maximum block size'
self.block_pad_multiple = lcm(*[block_size for block_size, _ in self.blocks])
self.downsample_factor = downsample_factor
def forward(self, x, mask = None):
b, n, block_mult, ds_factor, device = *x.shape, self.block_pad_multiple, self.downsample_factor, x.device
m = next_divisible_length(n, ds_factor)
# get character token embeddings
x = self.token_emb(x)
# do a conv to generate the positions for the tokens
x = self.pos_conv(x)
# pad both sequence and mask to length visibile by all block sizes from 0 to max block size
x = pad_to_multiple(x, block_mult, seq_dim = 1, dim = -2)
if exists(mask):
mask = pad_to_multiple(mask, block_mult, seq_dim = 1, dim = -1, value = False)
# compute representations for all blocks by mean pooling
block_masks = []
block_reprs = []
for block_size, offset in self.blocks:
# clone the input sequence as well as the mask, in order to pad for offsets
block_x = x.clone()
if exists(mask):
block_mask = mask.clone()
# pad for offsets, if needed
need_padding = offset > 0
if need_padding:
left_offset, right_offset = (block_size - offset), offset
block_x = F.pad(block_x, (0, 0, left_offset, right_offset), value = 0.)
if exists(mask):
block_mask = F.pad(block_mask, (left_offset, right_offset), value = False)
# group input sequence into blocks
blocks = rearrange(block_x, 'b (n m) d -> b n m d', m = block_size)
# either mean pool the blocks, or do a masked mean
if exists(mask):
mask_blocks = rearrange(block_mask, 'b (n m) -> b n m', m = block_size)
block_repr = masked_mean(blocks, mask_blocks, dim = -2)
else:
block_repr = blocks.mean(dim = -2)
# append the block representations, as well as the pooled block masks
block_repr = repeat(block_repr, 'b n d -> b (n m) d', m = block_size)
if need_padding:
block_repr = block_repr[:, left_offset:-right_offset]
block_reprs.append(block_repr)
if exists(mask):
mask_blocks = torch.any(mask_blocks, dim = -1)
mask_blocks = repeat(mask_blocks, 'b n -> b (n m)', m = block_size)
if need_padding:
mask_blocks = mask_blocks[:, left_offset:-right_offset]
block_masks.append(mask_blocks)
# stack all the block representations
block_reprs = torch.stack(block_reprs, dim = 2)
# calculate scores and softmax across the block size dimension
scores = self.score_fn(block_reprs)
if exists(mask):
block_masks = torch.stack(block_masks, dim = 2)
max_neg_value = -torch.finfo(scores.dtype).max
scores = scores.masked_fill(~block_masks, max_neg_value)
scores = scores.softmax(dim = 2)
# do the cheap consensus attention, eq (5) in paper
if self.score_consensus_attn:
score_sim = einsum('b i d, b j d -> b i j', scores, scores)
if exists(mask):
cross_mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')
max_neg_value = -torch.finfo(score_sim.dtype).max
score_sim = score_sim.masked_fill(~cross_mask, max_neg_value)
score_attn = score_sim.softmax(dim = -1)
scores = einsum('b i j, b j m -> b i m', score_attn, scores)
# multiply the block representations by the position-wise scores
scores = rearrange(scores, 'b n m -> b n m ()')
x = (block_reprs * scores).sum(dim = 2)
# truncate to length divisible by downsample factor
x = x[:, :m]
if exists(mask):
mask = mask[:, :m]
# final mean pooling downsample
x = rearrange(x, 'b (n m) d -> b n m d', m = ds_factor)
if exists(mask):
mask = rearrange(mask, 'b (n m) -> b n m', m = ds_factor)
x = masked_mean(x, mask, dim = 2)
mask = torch.any(mask, dim = -1)
else:
x = x.mean(dim = -2)
return x, mask
| charformer-pytorch-main | charformer_pytorch/charformer_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'retrieval-augmented-ddpm',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Retrieval-Augmented Denoising Diffusion Probabilistic Models',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/retrieval-augmented-ddpm',
keywords = [
'artificial intelligence',
'deep learning',
'denoising diffusion',
'retrieval'
],
install_requires=[
'clip-retrieval',
'einops>=0.4',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| retrieval-augmented-ddpm-main | setup.py |
retrieval-augmented-ddpm-main | retrieval_augmented_ddpm/retrieval_augmented_ddpm.py |
|
retrieval-augmented-ddpm-main | retrieval_augmented_ddpm/__init__.py |
|
import argparse
from pathlib import Path
from tqdm import tqdm
# torch
import torch
from einops import repeat
# vision imports
from PIL import Image
from torchvision.utils import make_grid, save_image
# dalle related classes and utils
from dalle_pytorch import __version__
from dalle_pytorch import DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE, DALLE
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, YttmTokenizer, ChineseTokenizer
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--dalle_path', type = str, required = True,
help='path to your trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default = None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default = None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
parser.add_argument('--text', type = str, required = True,
help='your text prompt')
parser.add_argument('--num_images', type = int, default = 128, required = False,
help='number of images')
parser.add_argument('--batch_size', type = int, default = 4, required = False,
help='batch size')
parser.add_argument('--top_k', type = float, default = 0.9, required = False,
help='top k filter threshold')
parser.add_argument('--outputs_dir', type = str, default = './outputs', required = False,
help='output directory')
parser.add_argument('--bpe_path', type = str,
help='path to your huggingface BPE json file')
parser.add_argument('--hug', dest='hug', action = 'store_true')
parser.add_argument('--chinese', dest='chinese', action = 'store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--gentxt', dest='gentxt', action='store_true')
args = parser.parse_args()
# helper fns
def exists(val):
return val is not None
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# load DALL-E
dalle_path = Path(args.dalle_path)
assert dalle_path.exists(), 'trained DALL-E must exist'
load_obj = torch.load(str(dalle_path))
dalle_params, vae_params, weights, vae_class_name, version = load_obj.pop('hparams'), load_obj.pop('vae_params'), load_obj.pop('weights'), load_obj.pop('vae_class_name', None), load_obj.pop('version', None)
# friendly print
if exists(version):
print(f'Loading a model trained with DALLE-pytorch version {version}')
else:
print('You are loading a model trained on an older version of DALL-E pytorch - it may not be compatible with the most recent version')
# load VAE
if args.taming:
vae = VQGanVAE(args.vqgan_model_path, args.vqgan_config_path)
elif vae_params is not None:
vae = DiscreteVAE(**vae_params)
else:
vae = OpenAIDiscreteVAE()
assert not (exists(vae_class_name) and vae.__class__.__name__ != vae_class_name), f'you trained DALL-E using {vae_class_name} but are trying to generate with {vae.__class__.__name__} - please make sure you are passing in the correct paths and settings for the VAE to use for generation'
# reconstitute DALL-E
dalle = DALLE(vae = vae, **dalle_params).cuda()
dalle.load_state_dict(weights)
# generate images
image_size = vae.image_size
texts = args.text.split('|')
for j, text in tqdm(enumerate(texts)):
if args.gentxt:
text_tokens, gen_texts = dalle.generate_texts(tokenizer, text=text, filter_thres = args.top_k)
text = gen_texts[0]
else:
text_tokens = tokenizer.tokenize([text], dalle.text_seq_len).cuda()
text_tokens = repeat(text_tokens, '() n -> b n', b = args.num_images)
outputs = []
for text_chunk in tqdm(text_tokens.split(args.batch_size), desc = f'generating images for - {text}'):
output = dalle.generate_images(text_chunk, filter_thres = args.top_k)
outputs.append(output)
outputs = torch.cat(outputs)
# save all images
file_name = text
outputs_dir = Path(args.outputs_dir) / file_name.replace(' ', '_')[:(100)]
outputs_dir.mkdir(parents = True, exist_ok = True)
for i, image in tqdm(enumerate(outputs), desc = 'saving images'):
save_image(image, outputs_dir / f'{i}.png', normalize=True)
with open(outputs_dir / 'caption.txt', 'w') as f:
f.write(file_name)
print(f'created {args.num_images} images at "{str(outputs_dir)}"')
| DALLE-pytorch-main | generate.py |
import math
from math import sqrt
import argparse
from pathlib import Path
# torch
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
# vision imports
from torchvision import transforms as T
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
# dalle classes and utils
from dalle_pytorch import distributed_utils
from dalle_pytorch import DiscreteVAE
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--image_folder', type = str, required = True,
help='path to your folder of images for learning the discrete VAE and its codebook')
parser.add_argument('--image_size', type = int, required = False, default = 128,
help='image size')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--epochs', type = int, default = 20, help = 'number of epochs')
train_group.add_argument('--batch_size', type = int, default = 8, help = 'batch size')
train_group.add_argument('--learning_rate', type = float, default = 1e-3, help = 'learning rate')
train_group.add_argument('--lr_decay_rate', type = float, default = 0.98, help = 'learning rate decay')
train_group.add_argument('--starting_temp', type = float, default = 1., help = 'starting temperature')
train_group.add_argument('--temp_min', type = float, default = 0.5, help = 'minimum temperature to anneal to')
train_group.add_argument('--anneal_rate', type = float, default = 1e-6, help = 'temperature annealing rate')
train_group.add_argument('--num_images_save', type = int, default = 4, help = 'number of images to save')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--num_tokens', type = int, default = 8192, help = 'number of image tokens')
model_group.add_argument('--num_layers', type = int, default = 3, help = 'number of layers (should be 3 or above)')
model_group.add_argument('--num_resnet_blocks', type = int, default = 2, help = 'number of residual net blocks')
model_group.add_argument('--smooth_l1_loss', dest = 'smooth_l1_loss', action = 'store_true')
model_group.add_argument('--emb_dim', type = int, default = 512, help = 'embedding dimension')
model_group.add_argument('--hidden_dim', type = int, default = 256, help = 'hidden dimension')
model_group.add_argument('--kl_loss_weight', type = float, default = 0., help = 'KL loss weight')
model_group.add_argument('--transparent', dest = 'transparent', action = 'store_true')
args = parser.parse_args()
# constants
IMAGE_SIZE = args.image_size
IMAGE_PATH = args.image_folder
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
LR_DECAY_RATE = args.lr_decay_rate
NUM_TOKENS = args.num_tokens
NUM_LAYERS = args.num_layers
NUM_RESNET_BLOCKS = args.num_resnet_blocks
SMOOTH_L1_LOSS = args.smooth_l1_loss
EMB_DIM = args.emb_dim
HIDDEN_DIM = args.hidden_dim
KL_LOSS_WEIGHT = args.kl_loss_weight
TRANSPARENT = args.transparent
CHANNELS = 4 if TRANSPARENT else 3
IMAGE_MODE = 'RGBA' if TRANSPARENT else 'RGB'
STARTING_TEMP = args.starting_temp
TEMP_MIN = args.temp_min
ANNEAL_RATE = args.anneal_rate
NUM_IMAGES_SAVE = args.num_images_save
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
# data
ds = ImageFolder(
IMAGE_PATH,
T.Compose([
T.Lambda(lambda img: img.convert(IMAGE_MODE) if img.mode != IMAGE_MODE else img),
T.Resize(IMAGE_SIZE),
T.CenterCrop(IMAGE_SIZE),
T.ToTensor()
])
)
if distributed_utils.using_backend(distributed_utils.HorovodBackend):
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds, num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank())
else:
data_sampler = None
dl = DataLoader(ds, BATCH_SIZE, shuffle = not data_sampler, sampler=data_sampler)
vae_params = dict(
image_size = IMAGE_SIZE,
num_layers = NUM_LAYERS,
num_tokens = NUM_TOKENS,
channels = CHANNELS,
codebook_dim = EMB_DIM,
hidden_dim = HIDDEN_DIM,
num_resnet_blocks = NUM_RESNET_BLOCKS
)
vae = DiscreteVAE(
**vae_params,
smooth_l1_loss = SMOOTH_L1_LOSS,
kl_div_loss_weight = KL_LOSS_WEIGHT
)
if not using_deepspeed:
vae = vae.cuda()
assert len(ds) > 0, 'folder does not contain any images'
if distr_backend.is_root_worker():
print(f'{len(ds)} images found for training')
# optimizer
opt = Adam(vae.parameters(), lr = LEARNING_RATE)
sched = ExponentialLR(optimizer = opt, gamma = LR_DECAY_RATE)
if distr_backend.is_root_worker():
# weights & biases experiment tracking
import wandb
model_config = dict(
num_tokens = NUM_TOKENS,
smooth_l1_loss = SMOOTH_L1_LOSS,
num_resnet_blocks = NUM_RESNET_BLOCKS,
kl_loss_weight = KL_LOSS_WEIGHT
)
run = wandb.init(
project = 'dalle_train_vae',
job_type = 'train_model',
config = model_config
)
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {'train_batch_size': BATCH_SIZE}
(distr_vae, distr_opt, distr_dl, distr_sched) = distr_backend.distribute(
args=args,
model=vae,
optimizer=opt,
model_parameters=vae.parameters(),
training_data=ds if using_deepspeed else dl,
lr_scheduler=sched if not using_deepspeed else None,
config_params=deepspeed_config,
)
using_deepspeed_sched = False
# Prefer scheduler in `deepspeed_config`.
if distr_sched is None:
distr_sched = sched
elif using_deepspeed:
# We are using a DeepSpeed LR scheduler and want to let DeepSpeed
# handle its scheduling.
using_deepspeed_sched = True
def save_model(path):
save_obj = {
'hparams': vae_params,
}
if using_deepspeed:
cp_path = Path(path)
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = str(path_sans_extension) + '-ds-cp'
distr_vae.save_checkpoint(cp_dir, client_state=save_obj)
# We do not return so we do get a "normal" checkpoint to refer to.
if not distr_backend.is_root_worker():
return
save_obj = {
**save_obj,
'weights': vae.state_dict()
}
torch.save(save_obj, path)
# starting temperature
global_step = 0
temp = STARTING_TEMP
for epoch in range(EPOCHS):
for i, (images, _) in enumerate(distr_dl):
images = images.cuda()
loss, recons = distr_vae(
images,
return_loss = True,
return_recons = True,
temp = temp
)
if using_deepspeed:
# Gradients are automatically zeroed after the step
distr_vae.backward(loss)
distr_vae.step()
else:
distr_opt.zero_grad()
loss.backward()
distr_opt.step()
logs = {}
if i % 100 == 0:
if distr_backend.is_root_worker():
k = NUM_IMAGES_SAVE
with torch.no_grad():
codes = vae.get_codebook_indices(images[:k])
hard_recons = vae.decode(codes)
images, recons = map(lambda t: t[:k], (images, recons))
images, recons, hard_recons, codes = map(lambda t: t.detach().cpu(), (images, recons, hard_recons, codes))
images, recons, hard_recons = map(lambda t: make_grid(t.float(), nrow = int(sqrt(k)), normalize = True, range = (-1, 1)), (images, recons, hard_recons))
logs = {
**logs,
'sample images': wandb.Image(images, caption = 'original images'),
'reconstructions': wandb.Image(recons, caption = 'reconstructions'),
'hard reconstructions': wandb.Image(hard_recons, caption = 'hard reconstructions'),
'codebook_indices': wandb.Histogram(codes),
'temperature': temp
}
wandb.save('./vae.pt')
save_model(f'./vae.pt')
# temperature anneal
temp = max(temp * math.exp(-ANNEAL_RATE * global_step), TEMP_MIN)
# lr decay
# Do not advance schedulers from `deepspeed_config`.
if not using_deepspeed_sched:
distr_sched.step()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
if distr_backend.is_root_worker():
if i % 10 == 0:
lr = distr_sched.get_last_lr()[0]
print(epoch, i, f'lr - {lr:6f} loss - {avg_loss.item()}')
logs = {
**logs,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item(),
'lr': lr
}
wandb.log(logs)
global_step += 1
if distr_backend.is_root_worker():
# save trained model to wandb as an artifact every epoch's end
model_artifact = wandb.Artifact('trained-vae', type = 'model', metadata = dict(model_config))
model_artifact.add_file('vae.pt')
run.log_artifact(model_artifact)
if distr_backend.is_root_worker():
# save final vae and cleanup
save_model('./vae-final.pt')
wandb.save('./vae-final.pt')
model_artifact = wandb.Artifact('trained-vae', type = 'model', metadata = dict(model_config))
model_artifact.add_file('vae-final.pt')
run.log_artifact(model_artifact)
wandb.finish()
| DALLE-pytorch-main | train_vae.py |
from setuptools import setup, find_packages
exec(open('dalle_pytorch/version.py').read())
setup(
name = 'dalle-pytorch',
packages = find_packages(),
include_package_data = True,
version = __version__,
license='MIT',
description = 'DALL-E - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/dalle-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'text-to-image'
],
install_requires=[
'axial_positional_embedding',
'DALL-E',
'einops>=0.3.2',
'ftfy',
'packaging',
'pillow',
'regex',
'rotary-embedding-torch',
'taming-transformers-rom1504',
'tokenizers',
'torch>=1.6',
'torchvision',
'transformers',
'tqdm',
'youtokentome',
'WebDataset'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| DALLE-pytorch-main | setup.py |
import argparse
from pathlib import Path
import time
from glob import glob
import os
import shutil
import torch
import wandb # Quit early if user doesn't have wandb installed.
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from dalle_pytorch import __version__
from dalle_pytorch import OpenAIDiscreteVAE, VQGanVAE, DiscreteVAE, DALLE
from dalle_pytorch import distributed_utils
from dalle_pytorch.loader import TextImageDataset
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, ChineseTokenizer, YttmTokenizer
# libraries needed for webdataset support
import webdataset as wds
from torchvision import transforms as T
from PIL import Image
from io import BytesIO
# argument parsing
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--vae_path', type=str,
help='path to your trained discrete VAE')
group.add_argument('--dalle_path', type=str,
help='path to your partially trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default = None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default = None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
parser.add_argument('--image_text_folder', type=str, required=True,
help='path to your folder of images and text for learning the DALL-E')
parser.add_argument('--wds', type = str, default='',
help = 'Comma separated list of WebDataset (1) image and (2) text column names. Must contain 2 values, e.g. img,cap.')
parser.add_argument('--truncate_captions', dest='truncate_captions', action='store_true',
help='Captions passed in which exceed the max token length will be truncated if this is set.')
parser.add_argument('--random_resize_crop_lower_ratio', dest='resize_ratio', type=float, default=0.75,
help='Random resized crop lower ratio')
parser.add_argument('--chinese', dest='chinese', action='store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--hug', dest='hug', action='store_true')
parser.add_argument('--bpe_path', type=str,
help='path to your BPE json file')
parser.add_argument('--dalle_output_file_name', type=str, default = "dalle",
help='output_file_name')
parser.add_argument('--fp16', action='store_true',
help='(experimental) - Enable DeepSpeed 16 bit precision. Reduces VRAM.')
parser.add_argument('--amp', action='store_true',
help='Apex "O1" automatic mixed precision. More stable than 16 bit precision. Can\'t be used in conjunction with deepspeed zero stages 1-3.')
parser.add_argument('--wandb_name', default='dalle_train_transformer',
help='Name W&B will use when saving results.\ne.g. `--wandb_name "coco2017-full-sparse"`')
parser.add_argument('--wandb_entity', default=None,
help='(optional) Name of W&B team/entity to log to.')
parser.add_argument('--stable_softmax', dest='stable_softmax', action='store_true',
help='Prevent values from becoming too large during softmax. Helps with stability in fp16 and Mixture of Quantization training.')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--flops_profiler', dest = 'flops_profiler', action='store_true', help = 'Exits after printing detailed flops/runtime analysis of forward/backward')
train_group.add_argument('--epochs', default = 20, type = int, help = 'Number of epochs')
train_group.add_argument('--save_every_n_steps', default = 1000, type = int, help = 'Save a checkpoint every n steps')
train_group.add_argument('--keep_n_checkpoints', default = None, type = int, help = '(Careful) Deletes old deepspeed checkpoints if there are more than n')
train_group.add_argument('--batch_size', default = 4, type = int, help = 'Batch size')
train_group.add_argument('--ga_steps', default = 1, type = int, help = 'Number of steps to accumulate gradients across per each iteration. DeepSpeed only.')
train_group.add_argument('--learning_rate', default = 3e-4, type = float, help = 'Learning rate')
train_group.add_argument('--clip_grad_norm', default = 0.5, type = float, help = 'Clip gradient norm')
train_group.add_argument('--lr_decay', dest = 'lr_decay', action = 'store_true')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--dim', default = 512, type = int, help = 'Model dimension')
model_group.add_argument('--text_seq_len', default = 256, type = int, help = 'Text sequence length')
model_group.add_argument('--depth', default = 2, type = int, help = 'Model depth')
model_group.add_argument('--heads', default = 8, type = int, help = 'Model number of heads')
model_group.add_argument('--dim_head', default = 64, type = int, help = 'Model head dimension')
train_group.add_argument('--ff_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
train_group.add_argument('--attn_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
model_group.add_argument('--reversible', dest = 'reversible', action='store_true')
model_group.add_argument('--loss_img_weight', default = 7, type = int, help = 'Image loss weight')
model_group.add_argument('--attn_types', default = 'full', type = str, help = 'comma separated list of attention types. attention type can be: full or sparse or axial_row or axial_col or conv_like.')
model_group.add_argument('--shift_tokens', help = 'Use the shift tokens feature', action = 'store_true')
model_group.add_argument('--rotary_emb', help = 'Use rotary embeddings', action = 'store_true')
model_group.add_argument('--shared_attn_ids', default = None, type = str, help = 'Comma separated list of shared attention layer ids. Default: sharing is disabled')
model_group.add_argument('--shared_ff_ids', default = None, type = str, help = 'Comma separated list of shared feed forward layer ids. Default: sharing is disabled')
model_group.add_argument('--share_input_output_emb', help = 'Share input and output embeddings', action = 'store_true')
args = parser.parse_args()
# helpers
def exists(val):
return val is not None
def get_trainable_params(model):
return [params for params in model.parameters() if params.requires_grad]
def cp_path_to_dir(cp_path, tag):
"""Convert a checkpoint path to a directory with `tag` inserted.
If `cp_path` is already a directory, return it unchanged.
"""
if not isinstance(cp_path, Path):
cp_path = Path(cp_path)
if cp_path.is_dir():
return cp_path
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = Path(f'{path_sans_extension}-{tag}-cp')
return cp_dir
# constants
WEBDATASET_IMAGE_TEXT_COLUMNS = tuple(args.wds.split(','))
ENABLE_WEBDATASET = True if len(WEBDATASET_IMAGE_TEXT_COLUMNS) == 2 else False
DALLE_OUTPUT_FILE_NAME = args.dalle_output_file_name + ".pt"
VAE_PATH = args.vae_path
VQGAN_MODEL_PATH = args.vqgan_model_path
VQGAN_CONFIG_PATH = args.vqgan_config_path
DALLE_PATH = args.dalle_path
RESUME = exists(DALLE_PATH)
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
GRAD_CLIP_NORM = args.clip_grad_norm
LR_DECAY = args.lr_decay
SAVE_EVERY_N_STEPS = args.save_every_n_steps
KEEP_N_CHECKPOINTS = args.keep_n_checkpoints
MODEL_DIM = args.dim
TEXT_SEQ_LEN = args.text_seq_len
DEPTH = args.depth
HEADS = args.heads
DIM_HEAD = args.dim_head
REVERSIBLE = args.reversible
LOSS_IMG_WEIGHT = args.loss_img_weight
FF_DROPOUT = args.ff_dropout
ATTN_DROPOUT = args.attn_dropout
STABLE = args.stable_softmax
SHIFT_TOKENS = args.shift_tokens
ROTARY_EMB = args.rotary_emb
ATTN_TYPES = tuple(args.attn_types.split(','))
SHARED_ATTN_IDS = tuple(args.shared_attn_ids.split(',')) if exists(args.shared_attn_ids) else None
SHARED_FF_IDS = tuple(args.shared_ff_ids.split(',')) if exists(args.shared_ff_ids) else None
SHARE_INPUT_OUTPUT_EMB = args.share_input_output_emb
DEEPSPEED_CP_AUX_FILENAME = 'auxiliary.pt'
if not ENABLE_WEBDATASET:
# quit early if you used the wrong folder name
assert Path(args.image_text_folder).exists(), f'The path {args.image_text_folder} was not found.'
else:
# quit early if no tar files were found
if Path(args.image_text_folder).is_dir():
DATASET = [str(p) for p in Path(args.image_text_folder).glob("**/*") if ".tar" in str(p).lower()] # .name
assert len(DATASET) > 0, 'The directory ({}) does not contain any WebDataset/.tar files.'.format(args.image_text_folder)
print('Found {} WebDataset .tar(.gz) file(s) under given path {}!'.format(len(DATASET), args.image_text_folder))
elif ('http://' in args.image_text_folder.lower()) | ('https://' in args.image_text_folder.lower()):
DATASET = f"pipe:curl -L -s {args.image_text_folder} || true"
print('Found {} http(s) link under given path!'.format(len(DATASET), args.image_text_folder))
elif 'gs://' in args.image_text_folder.lower():
DATASET = f"pipe:gsutil cat {args.image_text_folder} || true"
print('Found {} GCS link under given path!'.format(len(DATASET), args.image_text_folder))
elif '.tar' in args.image_text_folder:
DATASET = args.image_text_folder
print('Found WebDataset .tar(.gz) file under given path {}!'.format(args.image_text_folder))
else:
raise Exception('No folder, no .tar(.gz) and no url pointing to tar files provided under {}.'.format(args.image_text_folder))
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
is_root = distr_backend.is_root_worker()
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# reconstitute vae
if RESUME:
dalle_path = Path(DALLE_PATH)
if using_deepspeed:
cp_dir = cp_path_to_dir(dalle_path, 'ds')
assert cp_dir.is_dir(), \
f'DeepSpeed checkpoint directory {cp_dir} not found'
dalle_path = cp_dir / DEEPSPEED_CP_AUX_FILENAME
else:
assert dalle_path.exists(), 'DALL-E model file does not exist'
loaded_obj = torch.load(str(dalle_path), map_location='cpu')
dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']
opt_state = loaded_obj.get('opt_state')
scheduler_state = loaded_obj.get('scheduler_state')
if vae_params is not None:
vae = DiscreteVAE(**vae_params)
elif args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
resume_epoch = loaded_obj.get('epoch', 0)
else:
if exists(VAE_PATH):
vae_path = Path(VAE_PATH)
assert vae_path.exists(), 'VAE model file does not exist'
assert not vae_path.is_dir(), \
('Cannot load VAE model from directory; please use a '
'standard *.pt checkpoint. '
'Currently, merging a DeepSpeed-partitioned VAE into a DALLE '
'model is not supported.')
loaded_obj = torch.load(str(vae_path))
vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
vae.load_state_dict(weights)
else:
if is_root:
print('using pretrained VAE for encoding images to tokens')
vae_params = None
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
dalle_params = dict(
num_text_tokens=tokenizer.vocab_size,
text_seq_len=TEXT_SEQ_LEN,
dim=MODEL_DIM,
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD,
reversible=REVERSIBLE,
loss_img_weight=LOSS_IMG_WEIGHT,
attn_types=ATTN_TYPES,
ff_dropout=FF_DROPOUT,
attn_dropout=ATTN_DROPOUT,
stable=STABLE,
shift_tokens=SHIFT_TOKENS,
rotary_emb=ROTARY_EMB,
shared_attn_ids=SHARED_ATTN_IDS,
shared_ff_ids=SHARED_FF_IDS,
share_input_output_emb=SHARE_INPUT_OUTPUT_EMB,
)
resume_epoch = 0
IMAGE_SIZE = vae.image_size
CHANNELS = vae.channels
TRANSPARENT = CHANNELS == 4
IMAGE_MODE = 'RGBA' if CHANNELS == 4 else 'RGB'
# configure OpenAI VAE for float16s
if isinstance(vae, OpenAIDiscreteVAE) and args.fp16:
vae.enc.blocks.output.conv.use_float16 = True
# helpers
def group_weight(model):
group_decay, group_no_decay = [], []
for params in model.named_parameters():
if 'transformer' in params[0]:
if 'bias' in params[0] or 'norm' in params[0]:
group_no_decay.append(params[1])
continue
group_decay.append(params[1])
assert len(list(model.parameters())) == len(group_decay) + len(group_no_decay)
groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
return groups
# create dataset and dataloader
is_shuffle = not distributed_utils.using_backend(distributed_utils.HorovodBackend)
imagepreproc = T.Compose([
T.Lambda(lambda img: img.convert(IMAGE_MODE)
if img.mode != IMAGE_MODE else img),
T.RandomResizedCrop(IMAGE_SIZE,
scale=(args.resize_ratio, 1.),
ratio=(1., 1.)),
T.ToTensor(),
])
def imagetransform(b):
return Image.open(BytesIO(b))
def tokenize(s):
return tokenizer.tokenize(
s.decode('utf-8'),
TEXT_SEQ_LEN,
truncate_text=args.truncate_captions).squeeze(0)
if ENABLE_WEBDATASET:
DATASET_SIZE = int(1e9) # You need to set a nominal length for the Dataset in order to avoid warnings from DataLoader
myimg, mycap = WEBDATASET_IMAGE_TEXT_COLUMNS
image_text_mapping = {
myimg: imagetransform,
mycap: tokenize
}
image_mapping = {
myimg: imagepreproc
}
def filter_dataset(item): # For e.g. C@H which (rarely) has no caption available.
if mycap not in item:
return False
if myimg not in item:
return False
return True
w_dataset = wds.WebDataset(DATASET, handler=wds.warn_and_continue)
filtered_dataset = w_dataset.select(filter_dataset)
ds = filtered_dataset.map_dict(**image_text_mapping).map_dict(**image_mapping).to_tuple(mycap, myimg).batched(BATCH_SIZE / distr_backend.get_world_size(), partial=True)
else:
ds = TextImageDataset(
args.image_text_folder,
text_len=TEXT_SEQ_LEN,
image_size=IMAGE_SIZE,
transparent=TRANSPARENT,
resize_ratio=args.resize_ratio,
truncate_captions=args.truncate_captions,
tokenizer=tokenizer,
shuffle=is_shuffle,
)
assert len(ds) > 0, 'dataset is empty'
if is_root:
if not ENABLE_WEBDATASET:
print(f'{len(ds)} image-text pairs found for training')
# data sampler
data_sampler = None
if not is_shuffle:
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds,
num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank()
)
# WebLoader for WebDataset and DeepSpeed compatibility
if ENABLE_WEBDATASET:
dl = wds.WebLoader(ds, batch_size=None, shuffle=False, num_workers=4) # optionally add num_workers=2 (n) argument
number_of_batches = DATASET_SIZE // (BATCH_SIZE * distr_backend.get_world_size())
dl = dl.slice(number_of_batches)
dl.length = number_of_batches
else:
# Regular DataLoader for image-text-folder datasets
dl = DataLoader(ds, batch_size=BATCH_SIZE, shuffle=is_shuffle, drop_last=True, sampler=data_sampler)
# initialize DALL-E
dalle = DALLE(vae=vae, **dalle_params)
if not using_deepspeed:
if args.fp16:
dalle = dalle.half()
dalle = dalle.cuda()
if RESUME and not using_deepspeed:
dalle.load_state_dict(weights)
# optimizer
opt = Adam(get_trainable_params(dalle), lr=LEARNING_RATE)
if RESUME and opt_state:
opt.load_state_dict(opt_state)
# scheduler
scheduler = None
if LR_DECAY:
scheduler = ReduceLROnPlateau(
opt,
mode="min",
factor=0.5,
patience=10,
cooldown=10,
min_lr=1e-6,
verbose=True,
)
if RESUME and scheduler_state:
scheduler.load_state_dict(scheduler_state)
# experiment tracker
if is_root:
model_config = dict(
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD
)
run = wandb.init(
project=args.wandb_name,
entity=args.wandb_entity,
resume=False,
config=model_config,
)
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {
'train_batch_size': BATCH_SIZE,
'gradient_accumulation_steps': args.ga_steps,
'gradient_clipping': GRAD_CLIP_NORM,
'fp16': {
'enabled': args.fp16,
},
'amp': {
'enabled': args.amp,
'opt_level': 'O1',
},
"flops_profiler": {
"enabled": args.flops_profiler,
"profile_step": 200,
"module_depth": -1,
"top_modules": 1,
"detailed": True,
"output_file": None # TODO Can't get this to work.
},
}
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2:
print(f"Checkpoints made with DeepSpeed ZeRO Stages 2 and 3 will be stored in deepspeed checkpoint folder")
print(f"As such, they will require DeepSpeed as a dependency in order to resume from or generate with.")
print("See the deespeed conversion script for details on how to convert your ZeRO stage 2/3 checkpoint to a single file.")
print("If using a single GPU, consider running with apex automatic mixed precision instead for a similar speedup to ZeRO.")
time.sleep(2)
(distr_dalle, distr_opt, distr_dl, distr_scheduler) = distr_backend.distribute(
args=args,
model=dalle,
optimizer=opt,
model_parameters=get_trainable_params(dalle),
training_data=(
(None if ENABLE_WEBDATASET else ds)
if using_deepspeed
else dl
),
# Do not pass the LR scheduler to DeepSpeed so we can manually
# advance it.
lr_scheduler=scheduler if LR_DECAY and not using_deepspeed else None,
config_params=deepspeed_config,
)
# Prefer scheduler in `deepspeed_config`.
if LR_DECAY and distr_scheduler is None:
distr_scheduler = scheduler
avoid_model_calls = using_deepspeed and args.fp16
if RESUME and using_deepspeed:
distr_dalle.load_checkpoint(str(cp_dir))
def save_model(path, epoch=0):
save_obj = {
'hparams': dalle_params,
'vae_params': vae_params,
'epoch': epoch,
'version': __version__,
'vae_class_name': vae.__class__.__name__
}
if using_deepspeed:
cp_dir = cp_path_to_dir(path, 'ds')
if KEEP_N_CHECKPOINTS is not None and is_root:
checkpoints = sorted(glob(str(cp_dir / "global*")), key=os.path.getmtime, reverse=True)
for checkpoint in checkpoints[KEEP_N_CHECKPOINTS:]:
shutil.rmtree(checkpoint)
distr_dalle.save_checkpoint(cp_dir, client_state=save_obj)
if not is_root:
return
# Save auxiliary values so we can reuse the standard routine
# for loading.
save_obj = {
**save_obj,
# Save a nonsense value that directs the user to
# further help.
'weights': (
'To get a working standard checkpoint, '
'look into consolidating DeepSpeed checkpoints.'
),
}
torch.save(save_obj, str(cp_dir / DEEPSPEED_CP_AUX_FILENAME))
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2: # see https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
return
if not is_root:
return
save_obj = {
**save_obj,
'weights': dalle.state_dict(),
'opt_state': opt.state_dict(),
'scheduler_state': (scheduler.state_dict() if scheduler else None)
}
torch.save(save_obj, path)
def save_artifact(model_config, model_path, name = 'trained-dalle'):
model_artifact = wandb.Artifact(name, type='model', metadata=dict(model_config))
model_artifact.add_file(model_path)
run.log_artifact(model_artifact)
# training
# Saves a checkpoint before training begins to fail early when mis-configured.
# See https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
save_model(DALLE_OUTPUT_FILE_NAME, epoch=resume_epoch)
for epoch in range(resume_epoch, EPOCHS):
if data_sampler:
data_sampler.set_epoch(epoch)
for i, (text, images) in enumerate((dl if ENABLE_WEBDATASET else distr_dl)):
if i % 10 == 0 and is_root:
t = time.time()
if args.fp16:
images = images.half()
text, images = map(lambda t: t.cuda(), (text, images))
loss = distr_dalle(text, images, return_loss=True)
if using_deepspeed:
distr_dalle.backward(loss)
distr_dalle.step()
# Gradients are automatically zeroed after the step
else:
loss.backward()
clip_grad_norm_(distr_dalle.parameters(), GRAD_CLIP_NORM)
distr_opt.step()
distr_opt.zero_grad()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
log = {}
if i % 10 == 0 and is_root:
print(epoch, i, f'loss - {avg_loss.item()}')
log = {
**log,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item()
}
if i % SAVE_EVERY_N_STEPS == 0:
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if i % 100 == 0 and is_root:
sample_text = text[:1]
token_list = sample_text.masked_select(sample_text != 0).tolist()
decoded_text = tokenizer.decode(token_list)
if not avoid_model_calls:
# CUDA index errors when we don't guard this
image = dalle.generate_images(text[:1], filter_thres=0.9) # topk sampling at 0.9
if not avoid_model_calls:
log['image'] = wandb.Image(image, caption=decoded_text)
if i % 10 == 9 and is_root:
sample_per_sec = BATCH_SIZE * 10 / (time.time() - t)
log["sample_per_sec"] = sample_per_sec
print(epoch, i, f'sample_per_sec - {sample_per_sec}')
if i == 201 and args.flops_profiler:
raise StopIteration("Profiler has finished running. Stopping training early.")
if is_root:
wandb.log(log)
if LR_DECAY:
distr_scheduler.step(avg_loss)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if is_root:
# save trained model to wandb as an artifact every epoch's end
save_artifact(model_config, DALLE_OUTPUT_FILE_NAME)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if is_root:
wandb.save(DALLE_OUTPUT_FILE_NAME)
save_artifact(model_config, DALLE_OUTPUT_FILE_NAME)
wandb.finish()
| DALLE-pytorch-main | train_dalle.py |
from inspect import isfunction
from math import ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from rotary_embedding_torch import apply_rotary_emb
# helpers
def exists(val):
return val is not None
def uniq(arr):
return{el: True for el in arr}.keys()
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def stable_softmax(t, dim = -1, alpha = 32 ** 2):
t = t / alpha
t = t - torch.amax(t, dim = dim, keepdim = True).detach()
return (t * alpha).softmax(dim = dim)
def apply_pos_emb(pos_emb, qkv):
n = qkv[0].shape[-2]
pos_emb = pos_emb[..., :n, :]
return tuple(map(lambda t: apply_rotary_emb(pos_emb, t), qkv))
# classes
class Attention(nn.Module):
def __init__(self, dim, seq_len, causal = True, heads = 8, dim_head = 64, dropout = 0., stable = False,
static_mask = None):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.seq_len = seq_len
self.scale = dim_head ** -0.5
self.stable = stable
self.causal = causal
self.register_buffer('static_mask', static_mask, persistent=False)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None, rotary_pos_emb = None, cache = None, cache_key = None):
b, n, _, h, device = *x.shape, self.heads, x.device
softmax = torch.softmax if not self.stable else stable_softmax
offset = cache.get('offset', 0) if exists(cache) else 0
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb[..., offset:, :], (q, k, v))
q = q * self.scale
if offset > 0:
k_top, v_top = cache[cache_key]
k = torch.cat([k_top, k], dim=-2)
v = torch.cat([v_top, v], dim=-2)
if exists(cache):
cache[cache_key] = k, v
dots = torch.einsum('b h i d, b h j d -> b h i j', q, k)
mask_value = max_neg_value(dots)
if exists(mask):
mask = rearrange(mask, 'b j -> b () () j')
dots.masked_fill_(~mask, mask_value)
del mask
if self.causal and offset == 0: # causality is naturally enforced for the cached inference
i, j = dots.shape[-2:]
mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots.masked_fill_(mask, mask_value)
if exists(self.static_mask):
dots.masked_fill_(~self.static_mask[offset:offset + n, :offset + n], mask_value)
attn = softmax(dots, dim=-1)
out = torch.einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
# sparse attention with convolutional pattern, as mentioned in the blog post. customizable kernel size and dilation
class SparseConvCausalAttention(nn.Module):
def __init__(self, dim, seq_len, image_size = 32, kernel_size = 5, dilation = 1, heads = 8, dim_head = 64, dropout = 0., stable = False, **kwargs):
super().__init__()
assert kernel_size % 2 == 1, 'kernel size must be odd'
inner_dim = dim_head * heads
self.seq_len = seq_len
self.heads = heads
self.scale = dim_head ** -0.5
self.image_size = image_size
self.kernel_size = kernel_size
self.dilation = dilation
self.stable = stable
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None, rotary_pos_emb = None):
b, n, _, h, img_size, kernel_size, dilation, seq_len, device = *x.shape, self.heads, self.image_size, self.kernel_size, self.dilation, self.seq_len, x.device
softmax = torch.softmax if not self.stable else stable_softmax
img_seq_len = img_size ** 2
text_len = seq_len + 1 - img_seq_len
# padding
padding = seq_len - n + 1
mask = default(mask, lambda: torch.ones(b, text_len, device = device).bool())
x = F.pad(x, (0, 0, 0, padding), value = 0)
mask = mask[:, :text_len]
# derive query / keys / values
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb, (q, k, v))
q *= self.scale
((q_text, q_img), (k_text, k_img), (v_text, v_img)) = map(lambda t: (t[:, :-img_seq_len], t[:, -img_seq_len:]), (q, k, v))
# text attention
dots_text = einsum('b i d, b j d -> b i j', q_text, k_text)
mask_value = max_neg_value(dots_text)
i, j = dots_text.shape[-2:]
text_causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots_text.masked_fill_(text_causal_mask, mask_value)
attn_text = softmax(dots_text, dim = -1)
out_text = einsum('b i j, b j d -> b i d', attn_text, v_text)
# image attention
effective_kernel_size = (kernel_size - 1) * dilation + 1
same_padding = effective_kernel_size // 2
causal_padding = (same_padding * 2, 0, same_padding * 2, 0)
k_img, v_img = map(lambda t: rearrange(t, 'b (h w) c -> b c h w', h = img_size), (k_img, v_img))
k_img, v_img = map(lambda t: F.pad(t, causal_padding), (k_img, v_img))
k_img, v_img = map(lambda t: F.unfold(t, kernel_size, dilation = dilation), (k_img, v_img))
k_img, v_img = map(lambda t: rearrange(t, 'b (d j) i -> b i j d', j = kernel_size ** 2), (k_img, v_img))
# let image attend to all of text
dots_image = einsum('b i d, b i j d -> b i j', q_img, k_img)
dots_image_to_text = einsum('b i d, b j d -> b i j', q_img, k_text)
# use padding of 0 on tensor of 1s and unfold for padding mask
i, j = dots_image.shape[-2:]
ones = torch.ones((img_seq_len,), device = device)
ones = rearrange(ones, '(h w) -> () () h w', h = img_size)
ones = F.pad(ones, causal_padding, value = 0.)
ones = F.unfold(ones, kernel_size, dilation = dilation)
ones = rearrange(ones, 'b j i -> b i j')
# mask image attention
padding_mask = ones == 0.
# concat text mask with image causal mask
padding_mask = repeat(padding_mask, '() i j -> b i j', b = b * h)
mask = repeat(mask, 'b j -> (b h) i j', i = i, h = h)
mask = torch.cat((~mask, padding_mask), dim = -1)
# image can attend to all of text
dots = torch.cat((dots_image_to_text, dots_image), dim = -1)
dots.masked_fill_(mask, mask_value)
attn = softmax(dots, dim = -1)
# aggregate
attn_image_to_text, attn_image = attn[..., :text_len], attn[..., text_len:]
out_image_to_image = einsum('b i j, b i j d -> b i d', attn_image, v_img)
out_image_to_text = einsum('b i j, b j d -> b i d', attn_image_to_text, v_text)
out_image = out_image_to_image + out_image_to_text
# combine attended values for both text and image
out = torch.cat((out_text, out_image), dim = 1)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
out = self.to_out(out)
return out[:, :n]
# sparse axial causal attention
class SparseAxialCausalAttention(nn.Module):
def __init__(self, dim, seq_len, image_size = 32, axis = 0, heads = 8, dim_head = 64, dropout = 0., stable = False, **kwargs):
super().__init__()
assert axis in {0, 1}, 'axis must be either 0 (along height) or 1 (along width)'
self.axis = axis
inner_dim = dim_head * heads
self.seq_len = seq_len
self.heads = heads
self.scale = dim_head ** -0.5
self.image_size = image_size
self.stable = stable
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None, rotary_pos_emb = None):
b, n, _, h, img_size, axis, seq_len, device = *x.shape, self.heads, self.image_size, self.axis, self.seq_len, x.device
softmax = torch.softmax if not self.stable else stable_softmax
img_seq_len = img_size ** 2
text_len = seq_len + 1 - img_seq_len
# padding
padding = seq_len - n + 1
mask = default(mask, lambda: torch.ones(b, text_len, device = device).bool())
x = F.pad(x, (0, 0, 0, padding), value = 0)
mask = mask[:, :text_len]
# derive queries / keys / values
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb, (q, k, v))
q *= self.scale
((q_text, q_img), (k_text, k_img), (v_text, v_img)) = map(lambda t: (t[:, :-img_seq_len], t[:, -img_seq_len:]), (q, k, v))
# text attention
dots_text = einsum('b i d, b j d -> b i j', q_text, k_text)
mask_value = max_neg_value(dots_text)
i, j = dots_text.shape[-2:]
text_causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots_text.masked_fill_(text_causal_mask, mask_value)
attn_text = softmax(dots_text, dim = -1)
out_text = einsum('b i j, b j d -> b i d', attn_text, v_text)
# image attention
split_axis_einops = 'b (h w) c -> b h w c' if axis == 0 else 'b (h w) c -> b w h c'
merge_axis_einops = 'b x n d -> b (x n) d' if axis == 0 else 'b x n d -> b (n x) d'
# split out axis
q_img, k_img, v_img = map(lambda t: rearrange(t, split_axis_einops, h = img_size), (q_img, k_img, v_img))
# similarity
dots_image_to_image = einsum('b x i d, b x j d -> b x i j', q_img, k_img)
dots_image_to_text = einsum('b x i d, b j d -> b x i j', q_img, k_text)
dots = torch.cat((dots_image_to_text, dots_image_to_image), dim = -1)
# mask so image has full attention to text, but causal along axis
bh, x, i, j = dots.shape
causal_mask = torch.ones(i, img_size, device = device).triu_(img_size - i + 1).bool()
causal_mask = repeat(causal_mask, 'i j -> b x i j', b = bh, x = x)
mask = repeat(mask, 'b j -> (b h) x i j', h = h, x = x, i = i)
mask = torch.cat((~mask, causal_mask), dim = -1)
dots.masked_fill_(mask, mask_value)
# attention.
attn = softmax(dots, dim = -1)
# aggregate
attn_image_to_text, attn_image_to_image = attn[..., :text_len], attn[..., text_len:]
out_image_to_image = einsum('b x i j, b x j d -> b x i d', attn_image_to_image, v_img)
out_image_to_text = einsum('b x i j, b j d -> b x i d', attn_image_to_text, v_text)
out_image = out_image_to_image + out_image_to_text
# merge back axis
out_image = rearrange(out_image, merge_axis_einops, x = img_size)
# combine attended values for both text and image
out = torch.cat((out_text, out_image), dim = 1)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
out = self.to_out(out)
return out[:, :n]
# microsoft sparse attention CUDA kernel
class SparseAttention(Attention):
def __init__(
self,
*args,
block_size = 16,
text_seq_len = 256,
num_random_blocks = None,
**kwargs
):
super().__init__(*args, **kwargs)
from deepspeed.ops.sparse_attention import SparseSelfAttention, VariableSparsityConfig
self.block_size = block_size
num_random_blocks = default(num_random_blocks, self.seq_len // block_size // 4)
global_block_indices = list(range(ceil(text_seq_len / block_size)))
self.attn_fn = SparseSelfAttention(
sparsity_config = VariableSparsityConfig(
num_heads = self.heads,
block = self.block_size,
num_random_blocks = num_random_blocks,
global_block_indices = global_block_indices,
attention = 'unidirectional' if self.causal else 'bidirectional'
),
max_seq_length = self.seq_len,
attn_mask_mode = 'add'
)
def forward(self, x, mask = None, rotary_pos_emb = None):
b, n, _, h, device = *x.shape, self.heads, x.device
remainder = n % self.block_size
mask = default(mask, lambda: torch.ones(b, n, device = device).bool())
if remainder > 0:
padding = self.block_size - remainder
x = F.pad(x, (0, 0, 0, padding), value = 0)
mask = F.pad(mask, (0, padding), value = False)
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb, (q, k, v))
key_pad_mask = None
if exists(mask):
key_pad_mask = ~mask
attn_mask = None
if self.causal:
i, j = q.shape[-2], k.shape[-2]
mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
attn_mask = torch.zeros(i, j, device = device).to(q)
mask_value = max_neg_value(q) / 2
attn_mask.masked_fill_(mask, mask_value)
out = self.attn_fn(q, k, v, attn_mask = attn_mask, key_padding_mask = key_pad_mask)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out[:, :n]
| DALLE-pytorch-main | dalle_pytorch/attention.py |
__version__ = '1.6.6'
| DALLE-pytorch-main | dalle_pytorch/version.py |
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}):
super().__init__()
self.args_route = args_route
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).mean(dim=0)
| DALLE-pytorch-main | dalle_pytorch/reversible.py |
from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
import numpy as np
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
from dalle_pytorch import distributed_utils
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from dalle_pytorch.transformer import Transformer, DivideMax
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class always():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return self.val
def is_empty(t):
return t.nelement() == 0
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
def prob_mask_like(shape, prob, device):
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
def set_requires_grad(model, value):
for param in model.parameters():
param.requires_grad = value
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class SharedEmbedding(nn.Embedding):
def __init__(self, linear, start_index, end_index, **kwargs):
super().__init__(end_index - start_index, linear.weight.shape[1], **kwargs)
del self.weight
self.linear = linear
self.start_index = start_index
self.end_index = end_index
def forward(self, input):
return F.embedding(
input, self.linear.weight[self.start_index:self.end_index], self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
# discrete vae class
class ResBlock(nn.Module):
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(nn.Module):
def __init__(
self,
image_size = 256,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
reinmax = False,
kl_div_loss_weight = 0.,
normalization = ((*((0.5,) * 3), 0), (*((0.5,) * 3), 1))
):
super().__init__()
assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.channels = channels
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.reinmax = reinmax
self.codebook = nn.Embedding(num_tokens, codebook_dim)
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, enc_out, 4, stride = 2, padding = 1), nn.ReLU()))
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, dec_out, 4, stride = 2, padding = 1), nn.ReLU()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
dec_layers.insert(0, nn.Conv2d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv2d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv2d(dec_chans[-1], channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = tuple(map(lambda t: t[:channels], normalization))
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(self, self.codebook.weight)
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1).flatten(1)
return codebook_indices
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[-2] == image_size, f'input must have the correct image size {image_size}'
img = self.norm(img)
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
if self.straight_through and self.reinmax:
# use reinmax for better second-order accuracy - https://arxiv.org/abs/2304.08612
# algorithm 2
one_hot = one_hot.detach()
π0 = logits.softmax(dim = 1)
π1 = (one_hot + (logits / temp).softmax(dim = 1)) / 2
π1 = ((log(π1) - logits).detach() + logits).softmax(dim = 1)
π2 = 2 * π1 - 0.5 * π0
one_hot = π2 - π2.detach() + one_hot
sampled = einsum('b n h w, n d -> b d h w', one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
log_qy = F.log_softmax(logits, dim = -1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim_text = 512,
dim_image = 512,
dim_latent = 512,
num_text_tokens = 10000,
text_enc_depth = 6,
text_seq_len = 256,
text_heads = 8,
num_visual_tokens = 512,
visual_enc_depth = 6,
visual_heads = 8,
visual_image_size = 256,
visual_patch_size = 32,
channels = 3
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.text_transformer = Transformer(causal = False, seq_len = text_seq_len, dim = dim_text, depth = text_enc_depth, heads = text_heads, rotary_emb = False)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias = False)
assert visual_image_size % visual_patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (visual_image_size // visual_patch_size) ** 2
patch_dim = channels * visual_patch_size ** 2
self.visual_patch_size = visual_patch_size
self.to_visual_embedding = nn.Linear(patch_dim, dim_image)
self.visual_pos_emb = nn.Embedding(num_patches, dim_image)
self.visual_transformer = Transformer(causal = False, seq_len = num_patches, dim = dim_image, depth = visual_enc_depth, heads = visual_heads, rotary_emb = False)
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias = False)
self.temperature = nn.Parameter(torch.tensor(1.))
def forward(
self,
text,
image,
text_mask = None,
return_loss = False
):
b, device, p = text.shape[0], text.device, self.visual_patch_size
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
image_patches = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
image_emb = self.to_visual_embedding(image_patches)
image_emb += self.visual_pos_emb(torch.arange(image_emb.shape[1], device = device))
enc_text = self.text_transformer(text_emb, mask = text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim = 1)
else:
text_latents = enc_text.mean(dim = 1)
image_latents = enc_image.mean(dim = 1)
text_latents = self.to_text_latent(text_latents)
image_latents = self.to_visual_latent(image_latents)
text_latents, image_latents = map(lambda t: F.normalize(t, p = 2, dim = -1), (text_latents, image_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, image_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, image_latents) * temp
labels = torch.arange(b, device = device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
# main DALL-E class
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
vae,
num_text_tokens = 10000,
text_seq_len = 256,
depth,
heads = 8,
dim_head = 64,
reversible = False,
attn_dropout = 0.,
ff_dropout = 0,
sparse_attn = False,
attn_types = None,
loss_img_weight = 7,
stable = False,
sandwich_norm = False,
shift_tokens = True,
rotary_emb = True,
shared_attn_ids = None,
shared_ff_ids = None,
share_input_output_emb = False,
optimize_for_inference = False,
):
super().__init__()
assert isinstance(vae, (DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE)), 'vae must be an instance of DiscreteVAE'
image_size = vae.image_size
num_image_tokens = vae.num_tokens
image_fmap_size = (vae.image_size // (2 ** vae.num_layers))
image_seq_len = image_fmap_size ** 2
num_text_tokens = num_text_tokens + text_seq_len # reserve unique padding tokens for each position (text seq len)
self.text_pos_emb = nn.Embedding(text_seq_len + 1, dim) if not rotary_emb else always(0) # +1 for <bos>
self.image_pos_emb = AxialPositionalEmbedding(dim, axial_shape = (image_fmap_size, image_fmap_size)) if not rotary_emb else always(0)
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.num_image_tokens = num_image_tokens
self.text_seq_len = text_seq_len
self.image_seq_len = image_seq_len
seq_len = text_seq_len + image_seq_len
total_tokens = num_text_tokens + num_image_tokens
self.total_tokens = total_tokens
self.total_seq_len = seq_len
self.vae = vae
set_requires_grad(self.vae, False) # freeze VAE from being trained
self.transformer = Transformer(
dim = dim,
causal = True,
seq_len = seq_len,
depth = depth,
heads = heads,
dim_head = dim_head,
reversible = reversible,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
attn_types = attn_types,
image_fmap_size = image_fmap_size,
sparse_attn = sparse_attn,
stable = stable,
sandwich_norm = sandwich_norm,
shift_tokens = shift_tokens,
rotary_emb = rotary_emb,
shared_attn_ids = shared_attn_ids,
shared_ff_ids = shared_ff_ids,
optimize_for_inference = optimize_for_inference,
)
self.stable = stable
if stable:
self.norm_by_max = DivideMax(dim = -1)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
if share_input_output_emb:
self.text_emb = SharedEmbedding(self.to_logits[1], 0, num_text_tokens)
self.image_emb = SharedEmbedding(self.to_logits[1], num_text_tokens, total_tokens)
else:
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
seq_range = torch.arange(seq_len)
logits_range = torch.arange(total_tokens)
seq_range = rearrange(seq_range, 'n -> () n ()')
logits_range = rearrange(logits_range, 'd -> () () d')
logits_mask = (
((seq_range >= text_seq_len) & (logits_range < num_text_tokens)) |
((seq_range < text_seq_len) & (logits_range >= num_text_tokens))
)
self.register_buffer('logits_mask', logits_mask, persistent=False)
self.loss_img_weight = loss_img_weight
@torch.no_grad()
@eval_decorator
def generate_texts(
self,
tokenizer,
text = None,
*,
filter_thres = 0.5,
temperature = 1.
):
text_seq_len = self.text_seq_len
if text is None or text == "":
text_tokens = torch.tensor([[0]]).cuda()
else:
text_tokens = torch.tensor(tokenizer.tokenizer.encode(text)).cuda().unsqueeze(0)
for _ in range(text_tokens.shape[1], text_seq_len):
device = text_tokens.device
tokens = self.text_emb(text_tokens)
tokens += self.text_pos_emb(torch.arange(text_tokens.shape[1], device = device))
seq_len = tokens.shape[1]
output_transf = self.transformer(tokens)
if self.stable:
output_transf = self.norm_by_max(output_transf)
logits = self.to_logits(output_transf)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
text_tokens = torch.cat((text_tokens, sample[:, None]), dim=-1)
padding_tokens = set(np.arange(self.text_seq_len) + (self.num_text_tokens - self.text_seq_len))
texts = [tokenizer.tokenizer.decode(text_token, pad_tokens=padding_tokens) for text_token in text_tokens]
return text_tokens, texts
@torch.no_grad()
@eval_decorator
def generate_images(
self,
text,
*,
clip = None,
filter_thres = 0.5,
temperature = 1.,
img = None,
num_init_img_tokens = None,
cond_scale = 1.,
use_cache = False,
):
vae, text_seq_len, image_seq_len, num_text_tokens = self.vae, self.text_seq_len, self.image_seq_len, self.num_text_tokens
total_len = text_seq_len + image_seq_len
text = text[:, :text_seq_len] # make sure text is within bounds
out = text
if exists(img):
image_size = vae.image_size
assert img.shape[1] == 3 and img.shape[2] == image_size and img.shape[3] == image_size, f'input image must have the correct image size {image_size}'
indices = vae.get_codebook_indices(img)
num_img_tokens = default(num_init_img_tokens, int(0.4375 * image_seq_len)) # OpenAI used 14 * 32 initial tokens to prime
assert num_img_tokens < image_seq_len, 'number of initial image tokens for priming must be less than the total image token sequence length'
indices = indices[:, :num_img_tokens]
out = torch.cat((out, indices), dim = -1)
prev_cache = None
cache = {} if use_cache else None
for cur_len in range(out.shape[1], total_len):
is_image = cur_len >= text_seq_len
text, image = out[:, :text_seq_len], out[:, text_seq_len:]
logits = self.forward_with_cond_scale(text, image, cond_scale = cond_scale, cache = cache)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sample -= (num_text_tokens if is_image else 0) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens
out = torch.cat((out, sample[:, None]), dim=-1)
text_seq = out[:, :text_seq_len]
img_seq = out[:, -image_seq_len:]
images = vae.decode(img_seq)
if exists(clip):
scores = clip(text_seq, images, return_loss = False)
return images, scores
return images
def forward_with_cond_scale(self, *args, cond_scale = 1, cache = None, **kwargs):
if cond_scale == 1:
return self(*args, **kwargs)
prev_cache = cache.copy() if exists(cache) else None
logits = self(*args, cache = cache, **kwargs)
# discovery by Katherine Crowson
# https://twitter.com/RiversHaveWings/status/1478093658716966912
null_cond_logits = self(*args, null_cond_prob = 1., cache = prev_cache, **kwargs)
return null_cond_logits + (logits - null_cond_logits) * cond_scale
def forward(
self,
text,
image = None,
return_loss = False,
null_cond_prob = 0.,
cache = None,
):
assert text.shape[-1] == self.text_seq_len, f'the length {text.shape[-1]} of the text tokens you passed in does not have the correct length ({self.text_seq_len})'
batch, device, total_seq_len = text.shape[0], text.device, self.total_seq_len
# randomly remove text condition with <null_cond_prob> probability
if null_cond_prob > 0:
null_mask = prob_mask_like((batch,), null_cond_prob, device = device)
text *= rearrange(~null_mask, 'b -> b 1')
# make sure padding in text tokens get unique padding token id
text_range = torch.arange(self.text_seq_len, device = device) + (self.num_text_tokens - self.text_seq_len)
text = torch.where(text == 0, text_range, text)
# add <bos>
text = F.pad(text, (1, 0), value = 0)
tokens = self.text_emb(text)
tokens += self.text_pos_emb(torch.arange(text.shape[1], device = device))
seq_len = tokens.shape[1]
if exists(image) and not is_empty(image):
is_raw_image = len(image.shape) == 4
if is_raw_image:
image_size = self.vae.image_size
channels = self.vae.channels
assert tuple(image.shape[1:]) == (channels, image_size, image_size), f'invalid image of dimensions {image.shape} passed in during training'
image = self.vae.get_codebook_indices(image)
image_len = image.shape[1]
image_emb = self.image_emb(image)
image_emb += self.image_pos_emb(image_emb)
tokens = torch.cat((tokens, image_emb), dim = 1)
seq_len += image_len
# when training, if the length exceeds the total text + image length
# remove the last token, since it needs not to be trained
if tokens.shape[1] > total_seq_len:
seq_len -= 1
tokens = tokens[:, :-1]
if self.stable:
alpha = 0.1
tokens = tokens * alpha + tokens.detach() * (1 - alpha)
if exists(cache) and cache.get('offset'):
tokens = tokens[:, -1:]
out = self.transformer(tokens, cache=cache)
if self.stable:
out = self.norm_by_max(out)
logits = self.to_logits(out)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
if exists(cache) and cache.get('offset'):
logits_mask = logits_mask[:, -1:]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
if exists(cache):
cache['offset'] = cache.get('offset', 0) + logits.shape[1]
if not return_loss:
return logits
assert exists(image), 'when training, image must be supplied'
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text[:, 1:], offsetted_image), dim = 1)
logits = rearrange(logits, 'b n c -> b c n')
loss_text = F.cross_entropy(logits[:, :, :self.text_seq_len], labels[:, :self.text_seq_len])
loss_img = F.cross_entropy(logits[:, :, self.text_seq_len:], labels[:, self.text_seq_len:])
loss = (loss_text + self.loss_img_weight * loss_img) / (self.loss_img_weight + 1)
return loss
| DALLE-pytorch-main | dalle_pytorch/dalle_pytorch.py |
from dalle_pytorch.dalle_pytorch import DALLE, CLIP, DiscreteVAE
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from pkg_resources import get_distribution
from dalle_pytorch.version import __version__
| DALLE-pytorch-main | dalle_pytorch/__init__.py |
# take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py
# to give users a quick easy start to training DALL-E without doing BPE
import torch
import youtokentome as yttm
from tokenizers import Tokenizer
from tokenizers.processors import ByteLevel
from transformers import BertTokenizer
import html
import os
from functools import lru_cache
from pathlib import Path
import ftfy
import regex as re
# OpenAI simple tokenizer
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.vocab_size = 49408
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens, remove_start_end = True, pad_tokens = set()):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
if remove_start_end:
tokens = [token for token in tokens if token not in (49406, 40407, 0)]
text = ''.join([self.decoder[token] for token in tokens if token not in pad_tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
tokenizer = SimpleTokenizer()
# huggingface tokenizer
class HugTokenizer:
def __init__(self, bpe_path = None):
bpe_path = Path(bpe_path)
assert bpe_path.exists(), f'BPE json path {str(bpe_path)} does not exist'
tokenizer = Tokenizer.from_file(str(bpe_path))
tokenizer.post_processor = ByteLevel(trim_offsets = True)
self.tokenizer = tokenizer
self.vocab_size = tokenizer.get_vocab_size()
def decode(self, tokens, pad_tokens = set()):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
ignore_ids = pad_tokens.union({0})
tokens = [token for token in tokens if token not in ignore_ids]
return self.tokenizer.decode(tokens, skip_special_tokens = True)
def encode(self, text):
return self.tokenizer.encode(text).ids
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
# chinese tokenizer
class ChineseTokenizer:
def __init__(self):
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
self.tokenizer = tokenizer
self.vocab_size = tokenizer.vocab_size
def decode(self, tokens, pad_tokens = set()):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
ignore_ids = pad_tokens.union({0})
tokens = [token for token in tokens if token not in ignore_ids]
return self.tokenizer.decode(tokens)
def encode(self, text):
return torch.tensor(self.tokenizer.encode(text, add_special_tokens = False))
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
# yttm tokenizer
class YttmTokenizer:
def __init__(self, bpe_path = None):
bpe_path = Path(bpe_path)
assert bpe_path.exists(), f'BPE json path {str(bpe_path)} does not exist'
tokenizer = yttm.BPE(model = str(bpe_path))
self.tokenizer = tokenizer
self.vocab_size = tokenizer.vocab_size()
def decode(self, tokens, pad_tokens = set()):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
return self.tokenizer.decode(tokens, ignore_ids = pad_tokens.union({0}))
def encode(self, texts):
encoded = self.tokenizer.encode(texts, output_type = yttm.OutputType.ID)
return list(map(torch.tensor, encoded))
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = self.encode(texts)
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| DALLE-pytorch-main | dalle_pytorch/tokenizer.py |
from pathlib import Path
from random import randint, choice
import PIL
from torch.utils.data import Dataset
from torchvision import transforms as T
class TextImageDataset(Dataset):
def __init__(self,
folder,
text_len=256,
image_size=128,
truncate_captions=False,
resize_ratio=0.75,
transparent=False,
tokenizer=None,
shuffle=False
):
"""
@param folder: Folder containing images and text files matched by their paths' respective "stem"
@param truncate_captions: Rather than throw an exception, captions which are too long will be truncated.
"""
super().__init__()
self.shuffle = shuffle
path = Path(folder)
text_files = [*path.glob('**/*.txt')]
image_files = [
*path.glob('**/*.png'), *path.glob('**/*.jpg'),
*path.glob('**/*.jpeg'), *path.glob('**/*.bmp')
]
text_files = {text_file.stem: text_file for text_file in text_files}
image_files = {image_file.stem: image_file for image_file in image_files}
keys = (image_files.keys() & text_files.keys())
self.keys = list(keys)
self.text_files = {k: v for k, v in text_files.items() if k in keys}
self.image_files = {k: v for k, v in image_files.items() if k in keys}
self.text_len = text_len
self.truncate_captions = truncate_captions
self.resize_ratio = resize_ratio
self.tokenizer = tokenizer
image_mode = 'RGBA' if transparent else 'RGB'
self.image_transform = T.Compose([
T.Lambda(lambda img: img.convert(image_mode)
if img.mode != image_mode else img),
T.RandomResizedCrop(image_size,
scale=(self.resize_ratio, 1.),
ratio=(1., 1.)),
T.ToTensor()
])
def __len__(self):
return len(self.keys)
def random_sample(self):
return self.__getitem__(randint(0, self.__len__() - 1))
def sequential_sample(self, ind):
if ind >= self.__len__() - 1:
return self.__getitem__(0)
return self.__getitem__(ind + 1)
def skip_sample(self, ind):
if self.shuffle:
return self.random_sample()
return self.sequential_sample(ind=ind)
def __getitem__(self, ind):
key = self.keys[ind]
text_file = self.text_files[key]
image_file = self.image_files[key]
descriptions = text_file.read_text().split('\n')
descriptions = list(filter(lambda t: len(t) > 0, descriptions))
try:
description = choice(descriptions)
except IndexError as zero_captions_in_file_ex:
print(f"An exception occurred trying to load file {text_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
tokenized_text = self.tokenizer.tokenize(
description,
self.text_len,
truncate_text=self.truncate_captions
).squeeze(0)
try:
image_tensor = self.image_transform(PIL.Image.open(image_file))
except (PIL.UnidentifiedImageError, OSError) as corrupt_image_exceptions:
print(f"An exception occurred trying to load file {image_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
# Success
return tokenized_text, image_tensor
| DALLE-pytorch-main | dalle_pytorch/loader.py |
from collections import deque
from collections.abc import Iterable
from functools import partial
from itertools import islice, cycle
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from dalle_pytorch.attention import Attention, SparseAttention, SparseConvCausalAttention, SparseAxialCausalAttention
from rotary_embedding_torch import RotaryEmbedding, broadcat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
return val if isinstance(val, Iterable) else (val,) * depth
# classes
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim = self.dim, keepdim = True).detach()
return x / maxes
class NonCached(nn.Module):
"""
A wrapper for layers that don't support the inference cache themselves.
Reconstructs the full sequence before the layer and
cuts the suffix of the outputs after the layer.
"""
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *, cache = None, cache_key = None, **kwargs):
n = x.shape[-2]
if exists(cache):
if cache_key in cache:
x = torch.cat([cache[cache_key], x], dim=-2)
cache[cache_key] = x
out = self.fn(x, **kwargs)
return out[:, -n:]
class CachedAs(nn.Module):
"""
A wrapper that defines a key for the inference cache.
"""
def __init__(self, cache_key, fn):
super().__init__()
self.cache_key = cache_key
self.fn = fn
def forward(self, x, *, cache=None, **kwargs):
return self.fn(x, cache=cache, cache_key=self.cache_key, **kwargs)
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, cache=None, cache_key=None):
return self.net(x)
# token shift classes
class PreShiftToken(nn.Module):
def __init__(self, fn, image_size, seq_len):
super().__init__()
self.fn = fn
self.image_size = image_size
self.seq_len = seq_len
self.img_seq_len = image_size ** 2
self.text_len = seq_len - self.img_seq_len + 1
def forward(self, x, cache=None, cache_key=None, **kwargs):
seq_len, image_size, text_len = self.seq_len, self.image_size, self.text_len
if exists(cache) and cache_key in cache:
offset = cache['offset']
assert offset >= text_len, "cached inference for text is not supported"
q = cache[cache_key]
assert isinstance(q, deque) and len(q) == image_size
x_top, x_left, *x_pass = x[:, -1].chunk(4, dim=-1)
q.append((x_top, x_left))
x_top = q.popleft()[0]
x_left = q[-2][1]
if (offset - text_len) % image_size == 0:
x_left = torch.zeros_like(x_left)
x = torch.cat((x_top, x_left, *x_pass), dim=-1)
return self.fn(x[:, None], cache=cache, **kwargs)
n = x.shape[1]
padding = seq_len - n + 1
# if sequence is shorter than the text length, no image tokens to shift
if n < text_len:
return self.fn(x, **kwargs)
# get text and image tokens
x_text, x_img = x[:, :text_len], x[:, text_len:]
x_img = F.pad(x_img, (0, 0, 0, padding))
x_img = rearrange(x_img, 'b (h w) d -> b h w d', h = image_size)
# shift 1 from the left for text tokens
x_text_shift, x_text_pass = x_text.chunk(2, dim = -1)
x_text_shift = F.pad(x_text_shift, (0, 0, 1, -1))
x_text = torch.cat((x_text_shift, x_text_pass), dim = -1)
# shift from top, left for image tokens
x_img_shift_top, x_img_shift_left, *x_img_pass = x_img.chunk(4, dim = -1)
x_img_shift_left = F.pad(x_img_shift_left, (0, 0, 1, -1))
x_img_shift_top = F.pad(x_img_shift_top, (0, 0, 0, 0, 1, -1))
x_img = torch.cat((x_img_shift_top, x_img_shift_left, *x_img_pass), dim = -1)
# merge text and image sequence back together
x_img = rearrange(x_img, 'b h w d -> b (h w) d')
x_img = x_img[:, :-padding]
x = torch.cat((x_text, x_img), dim = 1)
if exists(cache):
dummy_top, dummy_left, *_ = x[:, -1].chunk(4, dim=-1)
dummy_top, dummy_left = torch.zeros_like(dummy_top), torch.zeros_like(dummy_left)
q = deque()
x_img = x_img[:, -image_size:]
for _ in range(image_size - x_img.shape[1]):
q.append((dummy_top, dummy_left))
for i in range(x_img.shape[1]):
q.append(x_img[:, i].chunk(4, dim=-1)[:2])
cache[cache_key] = q
return self.fn(x, cache=cache, **kwargs)
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
reversible = False,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
attn_types = None,
image_fmap_size = None,
sparse_attn = False,
stable = False,
sandwich_norm = False,
shift_tokens = False,
rotary_emb = True,
shared_attn_ids = None,
shared_ff_ids = None,
optimize_for_inference = False, # use cache-friendly masked attention instead of sparse one
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
self.seq_len = seq_len
self.image_fmap_size = image_fmap_size
attn_types = default(attn_types, ('full',))
attn_types = cast_tuple(attn_types)
attn_type_layer = islice(cycle(attn_types), depth)
shared_attn_ids = cycle(default(shared_attn_ids, range(depth)))
shared_ff_ids = cycle(default(shared_ff_ids, range(depth)))
shared_attn_layers = {}
shared_ff_layers = {}
for (ind, sparse_attn, attn_type, attn_id, ff_id) in \
zip(range(depth), sparse_layer, attn_type_layer, shared_attn_ids, shared_ff_ids):
if attn_type == 'full':
attn_class = partial(Attention, stable = stable)
elif attn_type == 'sparse':
attn_class = SparseAttention
elif attn_type == 'axial_row':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 0, image_size = image_fmap_size, stable = stable)
elif attn_type == 'axial_col':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 1, image_size = image_fmap_size, stable = stable)
elif attn_type == 'conv_like':
attn_class = partial(SparseConvCausalAttention, seq_len = seq_len, image_size = image_fmap_size, stable = stable)
else:
raise ValueError(f'attention type "{attn_type}" is not valid')
attn, reused_attn_type = shared_attn_layers.get(attn_id, (None, None))
if not exists(attn):
attn = attn_class(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)
shared_attn_layers[attn_id] = (attn, attn_type)
elif attn_type != reused_attn_type:
raise ValueError('attn_types do not match shared_attn_ids '
f'(ind = {ind}, attn_type = "{attn_type}", reused_attn_type = "{reused_attn_type}")')
ff = shared_ff_layers.get(ff_id)
if not exists(ff):
ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout)
shared_ff_layers[ff_id] = ff
if isinstance(attn, Attention):
attn = CachedAs(f'attn_{ind}', attn)
else:
# at the moment, other attention classes don't support cache
attn = NonCached(attn)
if shift_tokens:
attn = CachedAs(f'preshift_attn_{ind}', PreShiftToken(attn, image_size = image_fmap_size, seq_len = seq_len))
ff = CachedAs(f'preshift_ff_{ind}', PreShiftToken(ff, image_size = image_fmap_size, seq_len = seq_len))
layers.append(nn.ModuleList([
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
route_all = ((True, True),) * depth
attn_route_map = {'mask': route_attn, 'rotary_pos_emb': route_attn,
'cache': route_all}
self.layers = execute_type(layers, args_route = attn_route_map)
# generate positional embeddings for rotary
pos_emb = None
if rotary_emb:
rot_dim = dim_head // 3
img_seq_len = (image_fmap_size ** 2)
text_len = seq_len - img_seq_len + 1
text_pos_emb = RotaryEmbedding(dim = rot_dim)
img_axial_pos_emb = RotaryEmbedding(dim = rot_dim, freqs_for = 'pixel')
text_freqs = text_pos_emb(torch.arange(text_len))
img_to_text_freqs = text_pos_emb(torch.full((img_seq_len,), 8192)) # image is given a position far away from text
text_freqs = torch.cat((text_freqs, img_to_text_freqs), dim = 0)
img_freqs_axial = img_axial_pos_emb(torch.linspace(-1, 1, steps = image_fmap_size))
img_freqs = broadcat((rearrange(img_freqs_axial, 'i d -> i () d'), rearrange(img_freqs_axial, 'j d -> () j d')), dim = -1)
img_freqs = rearrange(img_freqs, 'h w d -> (h w) d')
text_axial_freqs = img_axial_pos_emb(torch.full((text_len,), -10.)) # text is given a position of -10 apart from the image axial positions, which is from range [-1, 1]
text_axial_freqs = torch.cat((text_axial_freqs, text_axial_freqs), dim = -1)
img_freqs = torch.cat((text_axial_freqs, img_freqs), dim = 0)
pos_emb = torch.cat((text_freqs, img_freqs), dim = -1)
pos_emb = rearrange(pos_emb, 'n d -> () n d')
self.register_buffer('pos_emb', pos_emb)
def forward(self, x, **kwargs):
return self.layers(x, rotary_pos_emb = self.pos_emb, **kwargs)
def _get_attention_mask(self, attn_type):
img_seq_len = self.image_fmap_size ** 2
text_len = self.seq_len + 1 - img_seq_len
static_mask = torch.zeros(self.seq_len, self.seq_len, dtype=torch.bool)
static_mask[:, :text_len] = True
if attn_type == 'axial_row':
for row in range(self.image_fmap_size):
begin = text_len + row * self.image_fmap_size
end = text_len + (row + 1) * self.image_fmap_size
static_mask[begin:end, begin:end] = True
elif attn_type == 'axial_col':
for col in range(self.image_fmap_size):
begin = text_len + col
static_mask[begin::self.image_fmap_size, begin::self.image_fmap_size] = True
else:
raise ValueError(f'attention type "{attn_type}" can\'t be simulated with a static mask')
return static_mask
| DALLE-pytorch-main | dalle_pytorch/transformer.py |
"""
Utility functions for optional distributed execution.
To use,
1. set the `BACKENDS` to the ones you want to make available,
2. in the script, wrap the argument parser with `wrap_arg_parser`,
3. in the script, set and use the backend by calling
`set_backend_from_args`.
You can check whether a backend is in use with the `using_backend`
function.
"""
from dalle_pytorch.distributed_backends import \
DeepSpeedBackend, \
DummyBackend, \
HorovodBackend
_DEFAULT_BACKEND = DummyBackend()
"""Which backend to use by default. Assumed to be _not_ distributed."""
BACKENDS = [
_DEFAULT_BACKEND,
DeepSpeedBackend(),
HorovodBackend(),
]
is_distributed = None
"""Whether we are distributed."""
backend = None
"""Backend in usage."""
def wrap_arg_parser(parser):
"""Add arguments to support optional distributed backend usage."""
parser.add_argument(
'--distributed_backend',
'--distr_backend',
type=str,
default=None,
help='which distributed backend to use. Do not distribute by default',
)
for distr_backend in BACKENDS:
parser = distr_backend.wrap_arg_parser(parser)
return parser
def set_backend_from_args(args):
"""Set and return the backend based on the given `args`."""
global is_distributed, backend
# Handle this specially for backwards compatibility.
if args.deepspeed:
args.distributed_backend = DeepSpeedBackend.BACKEND_NAME
if not args.distributed_backend:
is_distributed = False
backend = _DEFAULT_BACKEND
return backend
backend_name = args.distributed_backend.lower()
for distr_backend in BACKENDS:
if distr_backend.BACKEND_NAME.lower() == backend_name:
backend = distr_backend
if not backend.has_backend():
raise ModuleNotFoundError(
f'{backend.BACKEND_NAME} backend selected but '
'module not available'
)
print(f'Using {backend.BACKEND_NAME} for distributed execution')
is_distributed = True
return backend
raise ValueError(
'unknown backend; please check `distributed_utils.BACKENDS`')
def require_set_backend():
"""Raise an `AssertionError` when the backend has not been set."""
assert backend is not None, (
'distributed backend is not set. Please call '
'`distributed_utils.set_backend_from_args` at the start of your script'
)
def using_backend(test_backend):
"""Return whether the backend is set to `test_backend`.
`test_backend` may be a string of the name of the backend or
its class.
"""
require_set_backend()
if isinstance(test_backend, str):
return backend.BACKEND_NAME == test_backend
return isinstance(backend, test_backend)
| DALLE-pytorch-main | dalle_pytorch/distributed_utils.py |
import io
import sys
import os
import requests
import PIL
import warnings
import hashlib
import urllib
import yaml
from pathlib import Path
from tqdm import tqdm
from math import sqrt, log
from packaging import version
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel, GumbelVQ
import importlib
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch import distributed_utils
# constants
CACHE_PATH = os.path.expanduser("~/.cache/dalle")
OPENAI_VAE_ENCODER_PATH = 'https://cdn.openai.com/dall-e/encoder.pkl'
OPENAI_VAE_DECODER_PATH = 'https://cdn.openai.com/dall-e/decoder.pkl'
VQGAN_VAE_PATH = 'https://heibox.uni-heidelberg.de/f/140747ba53464f49b476/?dl=1'
VQGAN_VAE_CONFIG_PATH = 'https://heibox.uni-heidelberg.de/f/6ecf2af6c658432c8298/?dl=1'
# helpers methods
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def load_model(path):
with open(path, 'rb') as f:
return torch.load(f, map_location = torch.device('cpu'))
def map_pixels(x, eps = 0.1):
return (1 - 2 * eps) * x + eps
def unmap_pixels(x, eps = 0.1):
return torch.clamp((x - eps) / (1 - 2 * eps), 0, 1)
def download(url, filename = None, root = CACHE_PATH):
if (
not distributed_utils.is_distributed
or distributed_utils.backend.is_local_root_worker()
):
os.makedirs(root, exist_ok = True)
filename = default(filename, os.path.basename(url))
download_target = os.path.join(root, filename)
download_target_tmp = os.path.join(root, f'tmp.{filename}')
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if (
distributed_utils.is_distributed
and not distributed_utils.backend.is_local_root_worker()
and not os.path.isfile(download_target)
):
# If the file doesn't exist yet, wait until it's downloaded by the root worker.
distributed_utils.backend.local_barrier()
if os.path.isfile(download_target):
return download_target
with urllib.request.urlopen(url) as source, open(download_target_tmp, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
os.rename(download_target_tmp, download_target)
if (
distributed_utils.is_distributed
and distributed_utils.backend.is_local_root_worker()
):
distributed_utils.backend.local_barrier()
return download_target
def make_contiguous(module):
with torch.no_grad():
for param in module.parameters():
param.set_(param.contiguous())
# package versions
def get_pkg_version(pkg_name):
from pkg_resources import get_distribution
return get_distribution(pkg_name).version
# pretrained Discrete VAE from OpenAI
class OpenAIDiscreteVAE(nn.Module):
def __init__(self):
super().__init__()
assert version.parse(get_pkg_version('torch')) < version.parse('1.11.0'), 'torch version must be <= 1.10 in order to use OpenAI discrete vae'
self.enc = load_model(download(OPENAI_VAE_ENCODER_PATH))
self.dec = load_model(download(OPENAI_VAE_DECODER_PATH))
make_contiguous(self)
self.channels = 3
self.num_layers = 3
self.image_size = 256
self.num_tokens = 8192
@torch.no_grad()
def get_codebook_indices(self, img):
img = map_pixels(img)
z_logits = self.enc.blocks(img)
z = torch.argmax(z_logits, dim = 1)
return rearrange(z, 'b h w -> b (h w)')
def decode(self, img_seq):
b, n = img_seq.shape
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(sqrt(n)))
z = F.one_hot(img_seq, num_classes = self.num_tokens)
z = rearrange(z, 'b h w c -> b c h w').float()
x_stats = self.dec(z).float()
x_rec = unmap_pixels(torch.sigmoid(x_stats[:, :3]))
return x_rec
def forward(self, img):
raise NotImplemented
# VQGAN from Taming Transformers paper
# https://arxiv.org/abs/2012.09841
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def instantiate_from_config(config):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
class VQGanVAE(nn.Module):
def __init__(self, vqgan_model_path=None, vqgan_config_path=None):
super().__init__()
if vqgan_model_path is None:
model_filename = 'vqgan.1024.model.ckpt'
config_filename = 'vqgan.1024.config.yml'
download(VQGAN_VAE_CONFIG_PATH, config_filename)
download(VQGAN_VAE_PATH, model_filename)
config_path = str(Path(CACHE_PATH) / config_filename)
model_path = str(Path(CACHE_PATH) / model_filename)
else:
model_path = vqgan_model_path
config_path = vqgan_config_path
config = OmegaConf.load(config_path)
model = instantiate_from_config(config["model"])
state = torch.load(model_path, map_location = 'cpu')['state_dict']
model.load_state_dict(state, strict = False)
print(f"Loaded VQGAN from {model_path} and {config_path}")
self.model = model
# f as used in https://github.com/CompVis/taming-transformers#overview-of-pretrained-models
f = config.model.params.ddconfig.resolution / config.model.params.ddconfig.attn_resolutions[0]
self.num_layers = int(log(f)/log(2))
self.channels = 3
self.image_size = 256
self.num_tokens = config.model.params.n_embed
self.is_gumbel = isinstance(self.model, GumbelVQ)
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(
self, self.model.quantize.embed.weight if self.is_gumbel else self.model.quantize.embedding.weight)
@torch.no_grad()
def get_codebook_indices(self, img):
b = img.shape[0]
img = (2 * img) - 1
_, _, [_, _, indices] = self.model.encode(img)
if self.is_gumbel:
return rearrange(indices, 'b h w -> b (h w)', b=b)
return rearrange(indices, '(b n) -> b n', b = b)
def decode(self, img_seq):
b, n = img_seq.shape
one_hot_indices = F.one_hot(img_seq, num_classes = self.num_tokens).float()
z = one_hot_indices @ self.model.quantize.embed.weight if self.is_gumbel \
else (one_hot_indices @ self.model.quantize.embedding.weight)
z = rearrange(z, 'b (h w) c -> b c h w', h = int(sqrt(n)))
img = self.model.decode(z)
img = (img.clamp(-1., 1.) + 1) * 0.5
return img
def forward(self, img):
raise NotImplemented
| DALLE-pytorch-main | dalle_pytorch/vae.py |
"""
An abstract backend for distributed deep learning.
Provides several standard utility methods under a common API.
Please check the documentation of the class `DistributedBackend` for
details to implement a new backend.
"""
from importlib import import_module
class DistributedBackend:
"""An abstract backend class for distributed deep learning.
Provides several standard utility methods under a common API.
Variables that must be overridden:
- BACKEND_MODULE_NAME
- BACKEND_NAME
Methods that must be overridden:
- wrap_arg_parser
- _initialize
- _get_world_size
- _get_rank
- _get_local_rank
- _local_barrier
- _distribute
- _average_all
"""
BACKEND_MODULE_NAME = None
"""Name of the module to import for the backend."""
BACKEND_NAME = None
"""Name of the backend for printing."""
ROOT_RANK = 0
backend_module = None
"""The module to access the backend."""
is_initialized = False
"""Whether the backend is initialized."""
def __init__(self):
if self.BACKEND_MODULE_NAME is None:
raise NotImplementedError('BACKEND_MODULE_NAME is not set')
if self.BACKEND_NAME is None:
raise NotImplementedError('BACKEND_NAME is not set')
def has_backend(self):
"""Return whether the backend module is now imported."""
try:
self.backend_module = import_module(self.BACKEND_MODULE_NAME)
except ModuleNotFoundError:
return False
return True
def check_batch_size(self, batch_size):
"""Check whether the batch size makes sense for distribution."""
assert batch_size >= self.get_world_size(), \
(f"batch size can't be smaller than number of processes "
f'({batch_size} < {self.get_world_size()})')
def wrap_arg_parser(self, parser):
"""Add arguments to support optional distributed backend usage."""
raise NotImplementedError
def initialize(self):
"""Initialize the distributed backend."""
self._initialize()
self.is_initialized = True
def _initialize(self):
"""Initialize the distributed backend."""
raise NotImplementedError
def require_init(self):
"""Raise an error when the backend has not been initialized yet."""
assert self.is_initialized, \
(f'{BACKEND_NAME} backend has not been initialized; please call '
f'`distributed_utils.initialize` at the start of your script to '
f'allow optional distributed usage')
def get_world_size(self):
"""Return the amount of distributed processes."""
self.require_init()
return self._get_world_size()
def _get_world_size(self):
"""Return the amount of distributed processes."""
raise NotImplementedError
def get_rank(self):
"""Return the global rank of the calling worker process."""
self.require_init()
return self._get_rank()
def _get_rank(self):
"""Return the global rank of the calling worker process."""
raise NotImplementedError
def get_local_rank(self):
"""Return the local rank of the calling worker process.
The local rank is the rank based on a single node's processes.
"""
self.require_init()
return self._get_local_rank()
def _get_local_rank(self):
"""Return the local rank of the calling worker process.
The local rank is the rank based on a single node's processes.
"""
raise NotImplementedError
def is_root_worker(self):
"""Return whether the calling worker has the root rank."""
return self.get_rank() == self.ROOT_RANK
def is_local_root_worker(self):
"""Return whether the calling worker has the root rank on this node."""
return self.get_local_rank() == self.ROOT_RANK
def local_barrier(self):
"""Wait until all processes on this node have called this function."""
self.require_init()
self._local_barrier()
def _local_barrier(self):
"""Wait until all processes on this node have called this function."""
raise NotImplementedError
def distribute(
self,
args=None,
model=None,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
**kwargs,
):
"""Return a distributed model engine, optimizer, dataloader, and
learning rate scheduler. These are obtained by wrapping the
given values with the backend.
"""
self.require_init()
return self._distribute(
args,
model,
optimizer,
model_parameters,
training_data,
lr_scheduler,
**kwargs,
)
def _distribute(
self,
args=None,
model=None,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
**kwargs,
):
"""Return a distributed model engine, optimizer, dataloader, and
learning rate scheduler. These are obtained by wrapping the
given values with the backend.
"""
raise NotImplementedError
def average_all(self, tensor):
"""Return the average of `tensor` over all workers."""
self.require_init()
return self._average_all(tensor)
def _average_all(self, tensor):
"""Return the average of `tensor` over all workers."""
raise NotImplementedError
| DALLE-pytorch-main | dalle_pytorch/distributed_backends/distributed_backend.py |
from .deepspeed_backend import DeepSpeedBackend
from .distributed_backend import DistributedBackend
from .dummy_backend import DummyBackend
from .horovod_backend import HorovodBackend
| DALLE-pytorch-main | dalle_pytorch/distributed_backends/__init__.py |
import torch
from .distributed_backend import DistributedBackend
class HorovodBackend(DistributedBackend):
"""Distributed backend using Horovod."""
BACKEND_MODULE_NAME = 'horovod.torch'
BACKEND_NAME = 'Horovod'
def wrap_arg_parser(self, parser):
return parser
def check_batch_size(self, batch_size):
# Horovod uses the local batch size to determine the effective
# batch size.
pass
def _initialize(self):
self.backend_module.init()
if torch.cuda.is_available():
torch.cuda.set_device(self._get_local_rank())
def _get_world_size(self):
return self.backend_module.size()
def _get_rank(self):
return self.backend_module.rank()
def _get_local_rank(self):
return self.backend_module.local_rank()
def _local_barrier(self):
# Actually a global barrier but works for our purposes.
self.backend_module.join()
def _distribute(
self,
_args=None,
model=None,
optimizer=None,
_model_parameters=None,
training_data=None,
lr_scheduler=None,
**_kwargs,
):
optimizer = self.backend_module.DistributedOptimizer(optimizer)
self.backend_module.broadcast_parameters(
model.state_dict(), root_rank=self.ROOT_RANK)
self.backend_module.broadcast_optimizer_state(
optimizer, root_rank=self.ROOT_RANK)
return (model, optimizer, training_data, lr_scheduler)
def _average_all(self, tensor):
# Reduce op is average by default
averaged = self.backend_module.allreduce(tensor)
return averaged
| DALLE-pytorch-main | dalle_pytorch/distributed_backends/horovod_backend.py |
import json
import os
import torch
from .distributed_backend import DistributedBackend
class DeepSpeedBackend(DistributedBackend):
"""Distributed backend using the DeepSpeed engine."""
BACKEND_MODULE_NAME = 'deepspeed'
BACKEND_NAME = 'DeepSpeed'
def wrap_arg_parser(self, parser):
if not self.has_backend():
parser.add_argument(
'--deepspeed',
type=lambda _: False,
help=(
'whether to use DeepSpeed '
"(ignored since it's not available)"
),
)
else:
parser = self.backend_module.add_config_arguments(parser)
parser.add_argument(
'--local_rank',
type=int,
default=-1,
help='local rank passed from distributed launcher',
)
return parser
def _initialize(self):
self.backend_module.init_distributed()
if torch.cuda.is_available():
torch.cuda.set_device(self._get_local_rank())
@staticmethod
def _require_torch_distributed_init():
"""Raise an error when `torch.distributed` has not been
initialized yet.
"""
assert torch.distributed.is_initialized(), \
('`torch.distributed` is not initialized; please call '
'`DeepSpeedBackend.initialize` at the start of your script')
def _get_world_size(self):
self._require_torch_distributed_init()
return torch.distributed.get_world_size()
def _get_rank(self):
self._require_torch_distributed_init()
return torch.distributed.get_rank()
def _get_local_rank(self):
self._require_torch_distributed_init()
return int(os.environ['LOCAL_RANK'])
def _local_barrier(self):
self._require_torch_distributed_init()
torch.distributed.barrier()
def _check_args(self, args, optimizer, lr_scheduler, kwargs):
"""Return an appropriate optimizer and learning rate scheduler
after checking the values passed to `distribute`.
"""
self._check_argvs(args, optimizer, lr_scheduler, kwargs)
(optimizer, lr_scheduler) = self._check_config(
args, optimizer, lr_scheduler, kwargs)
return (optimizer, lr_scheduler)
def _check_argvs(self, args, optimizer, lr_scheduler, kwargs):
"""Apply several sanity checks to the given command
line arguments.
"""
has_json_config = (hasattr(args, 'deepspeed_config')
and args.deepspeed_config is not None)
has_dict_config = 'config_params' in kwargs
if (
# No config given
(not has_json_config and not has_dict_config)
# JSON config file does not exist
or (not has_dict_config
and not os.path.isfile(args.deepspeed_config))
):
# Let DeepSpeed handle these argument errors.
return
if not args.deepspeed:
print(
'WARNING: DeepSpeed backend was selected; setting '
'`args.deepspeed = True`'
)
args.deepspeed = True
if has_json_config and has_dict_config:
print(
'WARNING: DeepSpeed config was given as both JSON file and '
'Python dictionary. Python dictionary takes precedence.'
)
def _check_config(self, args, optimizer, lr_scheduler, kwargs):
"""Return an appropriate optimizer and learning rate scheduler
for the DeepSpeed configuration.
"""
if 'config_params' in kwargs:
config = kwargs['config_params']
else:
with open(args.deepspeed_config, 'r') as json_config_file:
config = json.load(json_config_file)
if 'optimizer' in config and optimizer is not None:
print(
'WARNING: Optimizer encountered in both DeepSpeed config and '
'keyword arguments. Optimizer in DeepSpeed config '
'takes precedence.'
)
optimizer = None
if 'scheduler' in config and lr_scheduler is not None:
print(
'WARNING: Learning rate scheduler encountered in both '
'DeepSpeed config and keyword arguments. Learning rate '
'scheduler in DeepSpeed config takes precedence.'
)
# For the LR scheduler, the JSON config already has
# precedence. We do this for forward compatibility.
lr_scheduler = None
return (optimizer, lr_scheduler)
def _distribute(
self,
args=None,
model=None,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
**kwargs,
):
"""Return a distributed model engine, optimizer, dataloader, and
learning rate scheduler. These are obtained by wrapping the
given values with the backend.
For the other or other possible arguments,
see `deepspeed.initialize`.
"""
(optimizer, lr_scheduler) = self._check_args(
args, optimizer, lr_scheduler, kwargs)
return self.backend_module.initialize(
args=args,
model=model,
optimizer=optimizer,
model_parameters=model_parameters,
training_data=training_data,
lr_scheduler=lr_scheduler,
**kwargs,
)
def _average_all(self, tensor):
self._require_torch_distributed_init()
# We copy because modification happens in-place
averaged = tensor.detach().clone()
# We use `all_reduce` because it is better supported than `reduce`
torch.distributed.all_reduce(averaged, torch.distributed.ReduceOp.SUM)
return averaged / self.get_world_size()
| DALLE-pytorch-main | dalle_pytorch/distributed_backends/deepspeed_backend.py |
from .distributed_backend import DistributedBackend
class DummyBackend(DistributedBackend):
"""Acts like a distributed backend.
Used as a stand-in replacement to obtain a non-distributed program.
"""
# We define this so we can use `super().__init__` but want this to
# throw an error upon import.
BACKEND_MODULE_NAME = 'NO MODULE'
BACKEND_NAME = 'Dummy'
def has_backend(self):
return True
def wrap_arg_parser(self, parser):
return parser
def _initialize(self):
pass
def _get_world_size(self):
return 1
def _get_rank(self):
return self.ROOT_RANK
def _get_local_rank(self):
return self.ROOT_RANK
def _local_barrier(self):
pass
def _distribute(
self,
_args=None,
model=None,
optimizer=None,
_model_parameters=None,
training_data=None,
lr_scheduler=None,
**_kwargs,
):
"""Return the model, optimizer, dataloader, and learning rate scheduler
as is.
"""
return (model, optimizer, training_data, lr_scheduler)
def _average_all(self, tensor):
return tensor
| DALLE-pytorch-main | dalle_pytorch/distributed_backends/dummy_backend.py |
from setuptools import setup, find_packages
setup(
name = 'performer-pytorch',
packages = find_packages(exclude=['examples']),
version = '1.1.4',
license='MIT',
description = 'Performer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/performer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'efficient attention',
'transformers'
],
install_requires=[
'einops>=0.3',
'local-attention>=1.1.1',
'torch>=1.6',
'axial-positional-embedding>=0.1.0'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| performer-pytorch-main | setup.py |
import deepspeed
from performer_pytorch import PerformerLM
from performer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
def add_argument():
parser=argparse.ArgumentParser(description='enwik8')
parser.add_argument('--with_cuda', default=False, action='store_true',
help='use CPU in case there\'s no GPU support')
parser.add_argument('--use_ema', default=False, action='store_true',
help='whether use exponential moving average')
parser.add_argument('-b', '--batch_size', default=32, type=int,
help='mini-batch size (default: 32)')
parser.add_argument('-e', '--epochs', default=30, type=int,
help='number of total epochs (default: 30)')
parser.add_argument('--local_rank', type=int, default=-1,
help='local rank passed from distributed launcher')
parser = deepspeed.add_config_arguments(parser)
args=parser.parse_args()
return args
# constants
EPOCHS = 20
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# helpers
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = PerformerLM(
num_tokens = 256,
dim = 512,
depth = 6,
max_seq_len = SEQ_LEN,
heads = 8,
causal = True,
reversible = True,
nb_features = 256,
use_scalenorm = True,
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
# setup deepspeed
cmd_args = add_argument()
model_engine, optimizer, trainloader, _ = deepspeed.initialize(args=cmd_args, model=model, model_parameters=model.parameters(), training_data=train_dataset)
# training
for _ in range(EPOCHS):
for i, data in enumerate(trainloader):
model_engine.train()
data = data.to(model_engine.local_rank)
loss = model_engine(data, return_loss = True)
model_engine.backward(loss)
model_engine.step()
print(loss.item() * GRADIENT_ACCUMULATE_EVERY)
if model_engine.local_rank != 0:
continue
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
inp = random.choice(val_dataset)[:-1]
loss = model(inp[None, :].cuda(), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp.cuda(), GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| performer-pytorch-main | examples/enwik8_deepspeed/train.py |
import tqdm
import torch
import torch.optim as optim
from performer_pytorch import PerformerEncDec
from apex import amp
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 32
LEARNING_RATE = 1e-4
GENERATE_EVERY = 100
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 32
DEC_SEQ_LEN = 64 + 1
# helpers
def cycle():
while True:
prefix = torch.ones((BATCH_SIZE, 1)).long().cuda()
src = torch.randint(2, NUM_TOKENS, (BATCH_SIZE, ENC_SEQ_LEN)).long().cuda()
tgt = torch.cat((prefix, src, src), 1)
src_mask = torch.ones(BATCH_SIZE, ENC_SEQ_LEN).bool().cuda()
tgt_mask = torch.ones(BATCH_SIZE, tgt.shape[1]).bool().cuda()
yield (src, tgt, src_mask, tgt_mask)
# instantiate model
model = PerformerEncDec(
dim=512,
enc_num_tokens=NUM_TOKENS,
enc_depth=1,
enc_heads=8,
enc_max_seq_len=ENC_SEQ_LEN,
enc_reversible=True,
enc_feature_redraw_interval=1000,
enc_nb_features = 64,
dec_num_tokens=NUM_TOKENS,
dec_depth=3,
dec_heads=8,
dec_max_seq_len=DEC_SEQ_LEN,
dec_reversible=True,
dec_feature_redraw_interval=1000,
dec_nb_features=64
).cuda()
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# amp
model, optim = amp.initialize(model, optim, opt_level = 'O1')
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
src, tgt, src_mask, tgt_mask = next(cycle())
loss = model(src, tgt, enc_mask=src_mask, dec_mask=tgt_mask)
with amp.scale_loss(loss, optim) as scaled_loss:
scaled_loss.backward()
print(f'{i}: {loss.item()}')
optim.step()
optim.zero_grad()
if i != 0 and i % GENERATE_EVERY == 0:
model.eval()
src, _, src_mask, _ = next(cycle())
src, src_mask = src[:1], src_mask[:1]
start_tokens = (torch.ones((1, 1)) * 1).long().cuda()
sample = model.generate(src, start_tokens, ENC_SEQ_LEN, enc_mask=src_mask)
incorrects = (src != sample).abs().sum()
print(f"input: ", src)
print(f"predicted output: ", sample)
print(f"incorrects: {incorrects}")
| performer-pytorch-main | examples/toy_tasks/enc_dec_copy_apex.py |
import tqdm
import torch
import torch.optim as optim
from performer_pytorch import PerformerEncDec
from torch.cuda.amp import autocast, GradScaler
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 32
LEARNING_RATE = 1e-4
GENERATE_EVERY = 100
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 32
DEC_SEQ_LEN = 64 + 1
# helpers
def cycle():
while True:
prefix = torch.ones((BATCH_SIZE, 1)).long().cuda()
src = torch.randint(2, NUM_TOKENS, (BATCH_SIZE, ENC_SEQ_LEN)).long().cuda()
tgt = torch.cat((prefix, src, src), 1)
src_mask = torch.ones(BATCH_SIZE, ENC_SEQ_LEN).bool().cuda()
tgt_mask = torch.ones(BATCH_SIZE, tgt.shape[1]).bool().cuda()
yield (src, tgt, src_mask, tgt_mask)
# instantiate model
model = PerformerEncDec(
dim=512,
enc_num_tokens=NUM_TOKENS,
enc_depth=1,
enc_heads=8,
enc_max_seq_len=ENC_SEQ_LEN,
enc_reversible=True,
enc_feature_redraw_interval=1000,
enc_nb_features = 64,
dec_num_tokens=NUM_TOKENS,
dec_depth=3,
dec_heads=8,
dec_max_seq_len=DEC_SEQ_LEN,
dec_reversible=True,
dec_feature_redraw_interval=1000,
dec_nb_features=64
).cuda()
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
scaler = GradScaler()
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
src, tgt, src_mask, tgt_mask = next(cycle())
with autocast():
loss = model(src, tgt, enc_mask=src_mask, dec_mask=tgt_mask)
scaler.scale(loss).backward()
print(f'{i}: {loss.item()}')
scaler.step(optim)
scaler.update()
optim.zero_grad()
if i != 0 and i % GENERATE_EVERY == 0:
model.eval()
src, _, src_mask, _ = next(cycle())
src, src_mask = src[:1], src_mask[:1]
start_tokens = (torch.ones((1, 1)) * 1).long().cuda()
sample = model.generate(src, start_tokens, ENC_SEQ_LEN, enc_mask=src_mask)
incorrects = (src != sample).abs().sum()
print(f"input: ", src)
print(f"predicted output: ", sample)
print(f"incorrects: {incorrects}")
| performer-pytorch-main | examples/toy_tasks/enc_dec_copy.py |
from performer_pytorch import PerformerLM
from performer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from torch.cuda.amp import autocast, GradScaler
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 2048
SEQ_LEN = 4096
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = PerformerLM(
num_tokens = 256,
dim = 512,
depth = 6,
max_seq_len = SEQ_LEN,
heads = 8,
causal = True,
reversible = True,
nb_features = 256,
use_scalenorm = True,
shift_tokens = True,
local_attn_heads = (8, 8, 8, 6, 4, 2)
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
scaler = GradScaler()
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
with autocast():
loss = model(next(train_loader), return_loss = True)
scaler.scale(loss).backward()
print(f'training loss: {loss.item()}')
scaler.unscale_(optim)
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
scaler.step(optim)
scaler.update()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0 and i != 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| performer-pytorch-main | examples/enwik8_simple/train.py |
import re
import torch
from torch import nn
from performer_pytorch.performer_pytorch import PerformerLM
from performer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
ENC_PREFIX = 'enc_'
DEC_PREFIX = 'dec_'
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return bool(re.match(f'^{prefix}', str))
def group_by_key_prefix(prefix, d):
return group_dict_by_key(lambda x: string_begins_with(prefix, x), d)
def group_by_key_prefix_and_remove_prefix(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(lambda x: string_begins_with(prefix, x), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
def extract_enc_dec_kwargs(kwargs):
enc_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(ENC_PREFIX, kwargs)
dec_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(DEC_PREFIX, kwargs)
return enc_kwargs, dec_kwargs, kwargs
def extract_and_set_enc_dec_kwargs(kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_enc_dec_kwargs(kwargs)
if 'mask' in enc_kwargs:
dec_kwargs.setdefault('context_mask', enc_kwargs['mask'])
return enc_kwargs, dec_kwargs, kwargs
class PerformerEncDec(nn.Module):
def __init__(
self,
dim,
ignore_index = 0,
pad_value = 0,
tie_token_embeds = False,
no_projection = False,
**kwargs
):
super().__init__()
enc_kwargs, dec_kwargs, _ = extract_enc_dec_kwargs(kwargs)
assert 'dim' not in dec_kwargs and 'dim' not in enc_kwargs, 'you must set the dim for both encoder and decoder'
enc_kwargs['dim'] = dec_kwargs['dim'] = dim
enc_kwargs['no_projection'] = dec_kwargs['no_projection'] = no_projection
dec_kwargs['causal'] = True
dec_kwargs['cross_attend'] = True
enc = PerformerLM(**enc_kwargs)
dec = PerformerLM(**dec_kwargs)
if tie_token_embeds:
enc.token_emb = dec.token_emb
self.enc = enc
self.dec = AutoregressiveWrapper(dec, ignore_index = ignore_index, pad_value = pad_value)
@torch.no_grad()
def generate(self, seq_in, seq_out_start, seq_len, **kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs)
encodings = self.enc(seq_in, return_encodings = True, **enc_kwargs)
return self.dec.generate(seq_out_start, seq_len, context = encodings, **{**dec_kwargs, **kwargs})
def forward(self, seq_in, seq_out, enc_mask = None, **kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs)
encodings = self.enc(seq_in, mask = enc_mask, return_encodings = True, **enc_kwargs)
return self.dec(seq_out, context = encodings, context_mask = enc_mask, **dec_kwargs) | performer-pytorch-main | performer_pytorch/performer_enc_dec.py |
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
def exists(val):
return val is not None
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def repetition_penalty_fn(logits, ctx, theta=1.2):
w = torch.ones(logits.shape[-1], dtype=torch.float, device=logits.device)
for i in torch.unique(ctx):
w[i] = theta
return logits/w
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = 0, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, repetition_penalty=1.0, repetition_penalty_ctx=32, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
# in case of conditional generation, if enc_mask is not provided use the correct context_mask
context_mask = kwargs.pop('context_mask', None)
if 'context' in kwargs and not exists(context_mask):
context = kwargs['context']
context_mask = torch.full(context.shape[:2], True, dtype=torch.bool, device=out.device)
kwargs.update(context_mask = context_mask)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, mask=input_mask, **kwargs)[:, -1, :]
if repetition_penalty > 1.0:
logits = repetition_penalty_fn(logits, out[-repetition_penalty_ctx:], theta=repetition_penalty)
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
input_mask = F.pad(input_mask, (0, 1), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, **kwargs):
xi = x[:, :-1]
xo = x[:, 1:]
# help auto-solve an area of confusion around input masks in auto-regressive
# if user supplies a mask that is only off by one from the source sequence, resolve it for them
mask = kwargs.pop('mask', None)
if mask is not None and mask.shape[1] == x.shape[1]:
mask = mask[:, :-1]
kwargs.update(mask = mask)
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| performer-pytorch-main | performer_pytorch/autoregressive_wrapper.py |
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}):
super().__init__()
self.args_route = args_route
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).sum(dim=0)
| performer-pytorch-main | performer_pytorch/reversible.py |
from performer_pytorch.performer_pytorch import PerformerLM, Performer, FastAttention, SelfAttention, CrossAttention, ProjectionUpdater
from performer_pytorch.autoregressive_wrapper import AutoregressiveWrapper
from performer_pytorch.performer_enc_dec import PerformerEncDec
| performer-pytorch-main | performer_pytorch/__init__.py |
import math
import torch
import torch.nn.functional as F
from torch import nn
from torch.cuda.amp import autocast
from einops import rearrange, repeat
from functools import partial
from contextlib import contextmanager
from local_attention import LocalAttention
from axial_positional_embedding import AxialPositionalEmbedding
from performer_pytorch.reversible import ReversibleSequence, SequentialSequence
from distutils.version import LooseVersion
TORCH_GE_1_8_0 = LooseVersion(torch.__version__) >= LooseVersion('1.8.0')
try:
from apex import amp
APEX_AVAILABLE = True
except:
APEX_AVAILABLE = False
# helpers
def exists(val):
return val is not None
def empty(tensor):
return tensor.numel() == 0
def default(val, d):
return val if exists(val) else d
@contextmanager
def null_context():
yield
def cast_tuple(val):
return (val,) if not isinstance(val, tuple) else val
def get_module_device(module):
return next(module.parameters()).device
def find_modules(nn_module, type):
return [module for module in nn_module.modules() if isinstance(module, type)]
class Always(nn.Module):
def __init__(self, val):
super().__init__()
self.val = val
def forward(self, *args, **kwargs):
return self.val
# token shifting helper and classes
def shift(t, amount, mask = None):
if amount == 0:
return t
if exists(mask):
t = t.masked_fill(~mask[..., None], 0.)
return F.pad(t, (0, 0, amount, -amount), value = 0.)
class PreShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
mask = kwargs.get('mask', None)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
# kernel functions
# transcribed from jax to pytorch from
# https://github.com/google-research/google-research/blob/master/performer/fast_attention/jax/fast_attention.py
def softmax_kernel(data, *, projection_matrix, is_query, normalize_data=True, eps=1e-4, device = None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
ratio = (projection_matrix.shape[0] ** -0.5)
projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
diag_data = data ** 2
diag_data = torch.sum(diag_data, dim=-1)
diag_data = (diag_data / 2.0) * (data_normalizer ** 2)
diag_data = diag_data.unsqueeze(dim=-1)
if is_query:
data_dash = ratio * (
torch.exp(data_dash - diag_data -
torch.amax(data_dash, dim=-1, keepdim=True).detach()) + eps)
else:
data_dash = ratio * (
torch.exp(data_dash - diag_data - torch.amax(data_dash, dim=(-1, -2), keepdim=True).detach()) + eps)
return data_dash.type_as(data)
def generalized_kernel(data, *, projection_matrix, kernel_fn = nn.ReLU(), kernel_epsilon = 0.001, normalize_data = True, device = None):
b, h, *_ = data.shape
data_normalizer = (data.shape[-1] ** -0.25) if normalize_data else 1.
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
projection = repeat(projection_matrix, 'j d -> b h j d', b = b, h = h)
projection = projection.type_as(data)
data_dash = torch.einsum('...id,...jd->...ij', (data_normalizer * data), projection)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime.type_as(data)
def orthogonal_matrix_chunk(cols, device = None):
unstructured_block = torch.randn((cols, cols), device = device)
if TORCH_GE_1_8_0:
q, r = torch.linalg.qr(unstructured_block.cpu(), mode = 'reduced')
else:
q, r = torch.qr(unstructured_block.cpu(), some = True)
q, r = map(lambda t: t.to(device), (q, r))
return q.t()
def gaussian_orthogonal_random_matrix(nb_rows, nb_columns, scaling = 0, device = None):
nb_full_blocks = int(nb_rows / nb_columns)
block_list = []
for _ in range(nb_full_blocks):
q = orthogonal_matrix_chunk(nb_columns, device = device)
block_list.append(q)
remaining_rows = nb_rows - nb_full_blocks * nb_columns
if remaining_rows > 0:
q = orthogonal_matrix_chunk(nb_columns, device = device)
block_list.append(q[:remaining_rows])
final_matrix = torch.cat(block_list)
if scaling == 0:
multiplier = torch.randn((nb_rows, nb_columns), device = device).norm(dim = 1)
elif scaling == 1:
multiplier = math.sqrt((float(nb_columns))) * torch.ones((nb_rows,), device = device)
else:
raise ValueError(f'Invalid scaling {scaling}')
return torch.diag(multiplier) @ final_matrix
# linear attention classes with softmax kernel
# non-causal linear attention
def linear_attention(q, k, v):
k_cumsum = k.sum(dim = -2)
D_inv = 1. / torch.einsum('...nd,...d->...n', q, k_cumsum.type_as(q))
context = torch.einsum('...nd,...ne->...de', k, v)
out = torch.einsum('...de,...nd,...n->...ne', context, q, D_inv)
return out
# efficient causal linear attention, created by EPFL
# TODO: rewrite EPFL's CUDA kernel to do mixed precision and remove half to float conversion and back
def causal_linear_attention(q, k, v, eps = 1e-6):
from fast_transformers.causal_product import CausalDotProduct
autocast_enabled = torch.is_autocast_enabled()
is_half = isinstance(q, torch.cuda.HalfTensor)
assert not is_half or APEX_AVAILABLE, 'half tensors can only be used if nvidia apex is available'
cuda_context = null_context if not autocast_enabled else partial(autocast, enabled = False)
causal_dot_product_fn = amp.float_function(CausalDotProduct.apply) if is_half else CausalDotProduct.apply
k_cumsum = k.cumsum(dim=-2) + eps
D_inv = 1. / torch.einsum('...nd,...nd->...n', q, k_cumsum.type_as(q))
with cuda_context():
if autocast_enabled:
q, k, v = map(lambda t: t.float(), (q, k, v))
out = causal_dot_product_fn(q, k, v)
out = torch.einsum('...nd,...n->...nd', out, D_inv)
return out
# inefficient causal linear attention, without cuda code, for reader's reference
# not being used
def causal_linear_attention_noncuda(q, k, v, chunk_size = 128, eps = 1e-6):
last_k_cumsum = 0
last_context_cumsum = 0
outs = []
for q, k, v in zip(*map(lambda t: t.chunk(chunk_size, dim = -2), (q, k, v))):
k_cumsum = last_k_cumsum + k.cumsum(dim=-2)
D_inv = 1. / torch.einsum('...nd,...nd->...n', q, k_cumsum.type_as(q) + eps)
context = torch.einsum('...nd,...ne->...nde', k, v)
context_cumsum = last_context_cumsum + context.cumsum(dim=-3)
out = torch.einsum('...nde,...nd,...n->...ne', context_cumsum, q, D_inv)
last_k_cumsum = k_cumsum[:, :, -1:]
last_context_cumsum = context_cumsum[:, :, -1:]
outs.append(out)
return torch.cat(outs, dim = -2)
class FastAttention(nn.Module):
def __init__(self, dim_heads, nb_features = None, ortho_scaling = 0, causal = False, generalized_attention = False, kernel_fn = nn.ReLU(), no_projection = False):
super().__init__()
nb_features = default(nb_features, int(dim_heads * math.log(dim_heads)))
self.dim_heads = dim_heads
self.nb_features = nb_features
self.ortho_scaling = ortho_scaling
self.create_projection = partial(gaussian_orthogonal_random_matrix, nb_rows = self.nb_features, nb_columns = dim_heads, scaling = ortho_scaling)
projection_matrix = self.create_projection()
self.register_buffer('projection_matrix', projection_matrix)
self.generalized_attention = generalized_attention
self.kernel_fn = kernel_fn
# if this is turned on, no projection will be used
# queries and keys will be softmax-ed as in the original efficient attention paper
self.no_projection = no_projection
self.causal = causal
if causal:
try:
import fast_transformers.causal_product.causal_product_cuda
self.causal_linear_fn = partial(causal_linear_attention)
except ImportError:
print('unable to import cuda code for auto-regressive Performer. will default to the memory inefficient non-cuda version')
self.causal_linear_fn = causal_linear_attention_noncuda
@torch.no_grad()
def redraw_projection_matrix(self, device):
projections = self.create_projection(device = device)
self.projection_matrix.copy_(projections)
del projections
def forward(self, q, k, v):
device = q.device
if self.no_projection:
q = q.softmax(dim = -1)
k = torch.exp(k) if self.causal else k.softmax(dim = -2)
elif self.generalized_attention:
create_kernel = partial(generalized_kernel, kernel_fn = self.kernel_fn, projection_matrix = self.projection_matrix, device = device)
q, k = map(create_kernel, (q, k))
else:
create_kernel = partial(softmax_kernel, projection_matrix = self.projection_matrix, device = device)
q = create_kernel(q, is_query = True)
k = create_kernel(k, is_query = False)
attn_fn = linear_attention if not self.causal else self.causal_linear_fn
out = attn_fn(q, k, v)
return out
# a module for keeping track of when to update the projections
class ProjectionUpdater(nn.Module):
def __init__(self, instance, feature_redraw_interval):
super().__init__()
self.instance = instance
self.feature_redraw_interval = feature_redraw_interval
self.register_buffer('calls_since_last_redraw', torch.tensor(0))
def fix_projections_(self):
self.feature_redraw_interval = None
def redraw_projections(self):
model = self.instance
if not self.training:
return
if exists(self.feature_redraw_interval) and self.calls_since_last_redraw >= self.feature_redraw_interval:
device = get_module_device(model)
fast_attentions = find_modules(model, FastAttention)
for fast_attention in fast_attentions:
fast_attention.redraw_projection_matrix(device)
self.calls_since_last_redraw.zero_()
return
self.calls_since_last_redraw += 1
def forward(self, x):
raise NotImplemented
# classes
class ReZero(nn.Module):
def __init__(self, fn):
super().__init__()
self.g = nn.Parameter(torch.tensor(1e-3))
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.g
class PreScaleNorm(nn.Module):
def __init__(self, dim, fn, eps=1e-5):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x, **kwargs):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
x = x / n * self.g
return self.fn(x, **kwargs)
class PreLayerNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class Chunk(nn.Module):
def __init__(self, chunks, fn, along_dim = -1):
super().__init__()
self.dim = along_dim
self.chunks = chunks
self.fn = fn
def forward(self, x, **kwargs):
if self.chunks == 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.chunks, dim = self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, nn.GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
causal = False,
heads = 8,
dim_head = 64,
local_heads = 0,
local_window_size = 256,
nb_features = None,
feature_redraw_interval = 1000,
generalized_attention = False,
kernel_fn = nn.ReLU(),
dropout = 0.,
no_projection = False,
qkv_bias = False,
attn_out_bias = True
):
super().__init__()
assert dim % heads == 0, 'dimension must be divisible by number of heads'
dim_head = default(dim_head, dim // heads)
inner_dim = dim_head * heads
self.fast_attention = FastAttention(dim_head, nb_features, causal = causal, generalized_attention = generalized_attention, kernel_fn = kernel_fn, no_projection = no_projection)
self.heads = heads
self.global_heads = heads - local_heads
self.local_attn = LocalAttention(window_size = local_window_size, causal = causal, autopad = True, dropout = dropout, look_forward = int(not causal), rel_pos_emb_config = (dim_head, local_heads)) if local_heads > 0 else None
self.to_q = nn.Linear(dim, inner_dim, bias = qkv_bias)
self.to_k = nn.Linear(dim, inner_dim, bias = qkv_bias)
self.to_v = nn.Linear(dim, inner_dim, bias = qkv_bias)
self.to_out = nn.Linear(inner_dim, dim, bias = attn_out_bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x, pos_emb = None, context = None, mask = None, context_mask = None, **kwargs):
b, n, _, h, gh = *x.shape, self.heads, self.global_heads
cross_attend = exists(context)
context = default(context, x)
context_mask = default(context_mask, mask) if not cross_attend else context_mask
q, k, v = self.to_q(x), self.to_k(context), self.to_v(context)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
(q, lq), (k, lk), (v, lv) = map(lambda t: (t[:, :gh], t[:, gh:]), (q, k, v))
attn_outs = []
if not empty(q):
if exists(context_mask):
global_mask = context_mask[:, None, :, None]
v.masked_fill_(~global_mask, 0.)
if exists(pos_emb) and not cross_attend:
q, k = apply_rotary_pos_emb(q, k, pos_emb)
out = self.fast_attention(q, k, v)
attn_outs.append(out)
if not empty(lq):
assert not cross_attend, 'local attention is not compatible with cross attention'
out = self.local_attn(lq, lk, lv, input_mask = mask)
attn_outs.append(out)
out = torch.cat(attn_outs, dim = 1)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return self.dropout(out)
class SelfAttention(Attention):
def forward(self, *args, context = None, **kwargs):
assert not exists(context), 'self attention should not receive context'
return super().forward(*args, **kwargs)
class CrossAttention(Attention):
def forward(self, *args, context = None, **kwargs):
assert exists(context), 'cross attention should receive context'
return super().forward(*args, context = context, **kwargs)
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x):
t = torch.arange(x.shape[1], device=x.device)
return self.emb(t)
# rotary positional embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(q, k, sinu_pos):
sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j = 2)
sin, cos = sinu_pos.unbind(dim = -2)
sin, cos = map(lambda t: repeat(t, 'b n -> b (n j)', j = 2), (sin, cos))
q, k = map(lambda t: (t * cos) + (rotate_every_two(t) * sin), (q, k))
return q, k
# sinusoidal positional embeddings
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
position = torch.arange(0, max_seq_len, dtype=torch.float)
sinusoid_inp = torch.einsum("i,j->ij", position, inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
self.register_buffer('emb', emb)
def forward(self, x):
return self.emb[None, :x.shape[1], :].to(x)
# performer
class Performer(nn.Module):
def __init__(
self,
dim,
depth,
heads,
dim_head,
local_attn_heads = 0,
local_window_size = 256,
causal = False,
ff_mult = 4,
nb_features = None,
feature_redraw_interval = 1000,
reversible = False,
ff_chunks = 1,
generalized_attention = False,
kernel_fn = nn.ReLU(),
use_scalenorm = False,
use_rezero = False,
ff_glu = False,
ff_dropout = 0.,
attn_dropout = 0.,
cross_attend = False,
no_projection = False,
auto_check_redraw = True,
qkv_bias = True,
attn_out_bias = True,
shift_tokens = False
):
super().__init__()
layers = nn.ModuleList([])
local_attn_heads = cast_tuple(local_attn_heads)
local_attn_heads = local_attn_heads * depth if len(local_attn_heads) == 1 else local_attn_heads
assert len(local_attn_heads) == depth, 'tuple specifying number of local attention heads per depth must be equal to the total depth'
assert all(map(lambda n: n >= 0 and n <= heads, local_attn_heads)), 'local attention head value must be less than the total number of heads'
if use_scalenorm:
wrapper_fn = partial(PreScaleNorm, dim)
elif use_rezero:
wrapper_fn = ReZero
else:
wrapper_fn = partial(PreLayerNorm, dim)
for _, local_heads in zip(range(depth), local_attn_heads):
attn = SelfAttention(dim, causal = causal, heads = heads, dim_head = dim_head, local_heads = local_heads, local_window_size = local_window_size, nb_features = nb_features, generalized_attention = generalized_attention, kernel_fn = kernel_fn, dropout = attn_dropout, no_projection = no_projection, qkv_bias = qkv_bias, attn_out_bias = attn_out_bias)
ff = Chunk(ff_chunks, FeedForward(dim, mult = ff_mult, dropout = ff_dropout, glu = ff_glu), along_dim = 1)
if shift_tokens:
shift = (0, 1) if causal else (-1, 0, 1)
attn, ff = map(lambda t: PreShiftTokens(shift, t), (attn, ff))
attn, ff = map(wrapper_fn, (attn, ff))
layers.append(nn.ModuleList([attn, ff]))
if not cross_attend:
continue
layers.append(nn.ModuleList([
wrapper_fn(CrossAttention(dim, heads = heads, dim_head = dim_head, nb_features = nb_features, generalized_attention = generalized_attention, kernel_fn = kernel_fn, dropout = attn_dropout, no_projection = no_projection, qkv_bias = qkv_bias, attn_out_bias = attn_out_bias)),
wrapper_fn(Chunk(ff_chunks, FeedForward(dim, mult = ff_mult, dropout = ff_dropout, glu = ff_glu), along_dim = 1))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth * (2 if cross_attend else 1)
route_context = ((False, False), (True, False)) * depth
attn_route_map = {'mask': route_attn, 'pos_emb': route_attn}
context_route_map = {'context': route_context, 'context_mask': route_context} if cross_attend else {}
self.net = execute_type(layers, args_route = {**attn_route_map, **context_route_map})
# keeping track of when to redraw projections for all attention layers
self.auto_check_redraw = auto_check_redraw
self.proj_updater = ProjectionUpdater(self.net, feature_redraw_interval)
def fix_projection_matrices_(self):
self.proj_updater.feature_redraw_interval = None
def forward(self, x, **kwargs):
if self.auto_check_redraw:
self.proj_updater.redraw_projections()
return self.net(x, **kwargs)
class PerformerLM(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
dim,
depth,
heads,
dim_head = 64,
local_attn_heads = 0,
local_window_size = 256,
causal = False,
ff_mult = 4,
nb_features = None,
feature_redraw_interval = 1000,
reversible = False,
ff_chunks = 1,
ff_glu = False,
emb_dropout = 0.,
ff_dropout = 0.,
attn_dropout = 0.,
generalized_attention = False,
kernel_fn = nn.ReLU(),
use_scalenorm = False,
use_rezero = False,
cross_attend = False,
no_projection = False,
tie_embed = False,
rotary_position_emb = True,
axial_position_emb = False,
axial_position_shape = None,
auto_check_redraw = True,
qkv_bias = False,
attn_out_bias = False,
shift_tokens = False
):
super().__init__()
local_attn_heads = cast_tuple(local_attn_heads)
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
if rotary_position_emb:
self.pos_emb = FixedPositionalEmbedding(dim, max_seq_len)
self.layer_pos_emb = FixedPositionalEmbedding(dim_head, max_seq_len)
elif axial_position_emb:
axial_position_shape = default(axial_position_shape, (math.ceil(max_seq_len / 64), 64))
self.pos_emb = AxialPositionalEmbedding(dim, axial_position_shape)
self.layer_pos_emb = Always(None)
else:
self.pos_emb = AbsolutePositionalEmbedding(dim, max_seq_len)
self.layer_pos_emb = Always(None)
self.dropout = nn.Dropout(emb_dropout)
self.performer = Performer(dim, depth, heads, dim_head, local_attn_heads, local_window_size, causal, ff_mult, nb_features, feature_redraw_interval, reversible, ff_chunks, generalized_attention, kernel_fn, use_scalenorm, use_rezero, ff_glu, ff_dropout, attn_dropout, cross_attend, no_projection, auto_check_redraw, qkv_bias, attn_out_bias, shift_tokens)
self.norm = nn.LayerNorm(dim)
self.to_out = nn.Linear(dim, num_tokens) if not tie_embed else None
def check_redraw_projections(self):
self.performer.check_redraw_projections()
def fix_projection_matrices_(self):
self.performer.fix_projection_matrices_()
def forward(self, x, return_encodings = False, **kwargs):
b, n, device = *x.shape, x.device
assert n <= self.max_seq_len, f'sequence length {n} must be less than the max sequence length {self.max_seq_len}'
# token and positional embeddings
x = self.token_emb(x)
x += self.pos_emb(x)
x = self.dropout(x)
# performer layers
layer_pos_emb = self.layer_pos_emb(x)
x = self.performer(x, pos_emb = layer_pos_emb, **kwargs)
# norm and to logits
x = self.norm(x)
if return_encodings:
return x
if exists(self.to_out):
return self.to_out(x)
return x @ self.token_emb.weight.t()
| performer-pytorch-main | performer_pytorch/performer_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'PaLM-rlhf-pytorch',
packages = find_packages(exclude=[]),
version = '0.2.1',
license='MIT',
description = 'PaLM + Reinforcement Learning with Human Feedback - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/PaLM-rlhf-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'reinforcement learning',
'human feedback'
],
install_requires=[
'accelerate',
'beartype',
'einops>=0.6',
'lion-pytorch',
'torch>=1.6',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| PaLM-rlhf-pytorch-main | setup.py |
import gzip
import random
import tqdm
import numpy as np
import torch
from lion_pytorch import Lion
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from palm_rlhf_pytorch import PaLM
from accelerate import Accelerator
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# accelerator
accelerator = Accelerator()
device = accelerator.device
# instantiate palm
model = PaLM(
num_tokens=256,
dim=512,
depth=8,
flash_attn=True
).to(device)
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.to(device)
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = Lion(model.palm_parameters(), lr = LEARNING_RATE)
model, optim, train_loader, val_loader = accelerator.prepare(
model, optim, train_loader, val_loader
)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10.0, desc="training"):
model.train()
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
accelerator.backward(loss / GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"training loss: {loss.item()}")
accelerator.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
accelerator.print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
accelerator.print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(GENERATE_LENGTH, inp[None, ...])
output_str = decode_tokens(sample[0])
accelerator.print(output_str, "\n")
| PaLM-rlhf-pytorch-main | train.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attention(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
use_flash_attn = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash_attn = use_flash_attn
assert not (use_flash_attn and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash_attn:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.use_flash_attn:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
return out
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/attention.py |
import math
import copy
from pathlib import Path
from collections import namedtuple
from functools import wraps
from itertools import zip_longest
from tqdm import tqdm
from beartype import beartype
from beartype.typing import Tuple, Optional
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from palm_rlhf_pytorch.attention import Attention
from palm_rlhf_pytorch.utils import top_p, top_k, masked_mean, gumbel_sample, eval_decorator
from palm_rlhf_pytorch.lora import LoRA
# functions and decorators
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def identity(t, *args, **kwargs):
return t
def l2norm(t):
return F.normalize(t, dim = -1)
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
y = self.fn(x, **kwargs)
if not any([t.requires_grad for t in (x, y)]):
return x.add_(y)
return y + x
# rotary positional embedding w/ xpos
# https://arxiv.org/abs/2104.09864
# https://arxiv.org/abs/2212.10554v1
class RotaryEmbedding(nn.Module):
def __init__(self, dim, scale_base = 512, use_xpos = True):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
self.use_xpos = use_xpos
self.scale_base = scale_base
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not self.use_xpos:
return freqs, torch.ones(1, device = device)
power = (t - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t, scale = 1.):
return (t * pos.cos() * scale) + (rotate_half(t) * pos.sin() * scale)
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
causal = True,
heads = 8,
qk_rmsnorm = False,
qk_scale = 8,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
use_xpos = True,
xpos_scale_base = 512,
flash_attn = False,
):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.qk_rmsnorm = qk_rmsnorm
if qk_rmsnorm:
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.attend = Attention(
causal = causal,
dropout = attn_dropout,
use_flash_attn = flash_attn
)
self.heads = heads
self.scale = (dim_head ** -0.5) if not qk_rmsnorm else qk_scale
self.causal = causal
self.rotary_emb = RotaryEmbedding(dim_head, scale_base = xpos_scale_base, use_xpos = use_xpos and causal)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.flash_attn = flash_attn
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.attn_dropout = nn.Dropout(attn_dropout)
self.flash_attn_dropout = attn_dropout
# parallel feedforward tail
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Dropout(ff_dropout),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.register_buffer("pos_emb", None, persistent=False)
self.register_buffer("pos_emb_scale", None, persistent=False)
def get_rotary_embedding(self, n, device):
if exists(self.pos_emb) and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n], self.pos_emb_scale[:n]
pos_emb, scale = self.rotary_emb(n, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
self.register_buffer("pos_emb_scale", scale, persistent=False)
return pos_emb, scale
def forward(
self,
x,
mask = None,
finetune_modules = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# finetune loras
lora_q = lora_k = lora_v = lora_o = None
if exists(finetune_modules):
lora_q, lora_k, lora_v, lora_o = finetune_modules
q = q + lora_q(x)
k = k + lora_k(x)
v = v + lora_v(x)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# qk rmsnorm
if self.qk_rmsnorm:
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# rotary embeddings with xpos decay for better length extrapolation
positions, scale = self.get_rotary_embedding(n, device)
q = apply_rotary_pos_emb(positions, q, scale)
k = apply_rotary_pos_emb(positions, k, scale ** -1)
# attention function, either regular or flash
out = self.attend(q, k, v, mask = mask)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
attn_out = self.attn_out(out)
ff_out = self.ff_out(ff)
if exists(lora_o):
attn_out = attn_out + lora_o(out)
return attn_out + ff_out
# transformer
@beartype
class PaLM(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
depth,
causal = True,
dim_head = 64,
heads = 8,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
qk_rmsnorm = False,
lora_r = 8,
rotary_xpos_scale_base = 512,
flash_attn = False,
finetune_scopes = tuple(),
cross_entropy_ignore_index = 0
):
super().__init__()
self.dim = dim
self.dim_head = dim_head
self.heads = heads
self.causal = causal
self.num_tokens = num_tokens
self.token_emb = nn.Embedding(num_tokens, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
block = Residual(ParallelTransformerBlock(
dim = dim,
causal = causal,
dim_head = dim_head,
heads = heads,
qk_rmsnorm = qk_rmsnorm,
ff_mult = ff_mult,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
xpos_scale_base = rotary_xpos_scale_base,
flash_attn = flash_attn
))
self.layers.append(block)
self.norm = LayerNorm(dim)
self.to_logits = nn.Linear(dim, num_tokens, bias=False)
self.to_logits.weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
# fine tuning related
self.lora_r = lora_r
self.finetune_modules = nn.ModuleDict({})
for scope in finetune_scopes:
self.add_finetune_params(scope)
# loss related
self.cross_entropy_ignore_index = cross_entropy_ignore_index
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
path = Path(path)
assert path.exists()
self.load_state_dict(torch.load(str(path)))
def set_dropout(self, dropout):
for module in self.layers.modules():
if isinstance(module, nn.Dropout):
module.p = dropout
return self
def add_finetune_params(self, scope, lora_r = None):
assert scope not in self.finetune_modules, f'finetune scope {scope} already found'
dim, dim_head, heads, r, device = self.dim, self.dim_head, self.heads, default(lora_r, self.lora_r), self.device
q_inner_dim = heads * dim_head
kv_inner_dim = dim_head
lora_modules = nn.ModuleList([])
for _ in range(len(self.layers)):
lora_modules.append(nn.ModuleList([
LoRA(dim, q_inner_dim, r = r), # queries
LoRA(dim, kv_inner_dim, r = r), # keys
LoRA(dim, kv_inner_dim, r = r), # values
LoRA(q_inner_dim, dim, r = r) # wo
]))
self.finetune_modules[scope] = lora_modules.to(device)
def remove_finetune_params(self, scope):
assert scope in self.finetune_modules, f'finetune scope {scope} not found'
return self.finetune_modules.pop(scope)
@torch.no_grad()
def merge_finetune_params(self, scope):
""" in the case one wants to merge the fine-tuned actor LORA parameters and do multiple rounds of fine tuning off different reward models """
assert scope in self.finetune_modules, f'finetune scope {scope} not found'
lora_modules = self.finetune_modules.pop(scope)
for layer, (lora_q, lora_k, lora_v, lora_o) in zip(self.layers, lora_modules):
block = layer.fn
fused_attn_ff_weight = block.fused_attn_ff_proj.weight
attn_out_weight = block.attn_out.weight
fused_proj_out_dim = fused_attn_ff_weight.shape[0]
lora_qkv_weight, _ = pack([lora_q.weight, lora_k.weight, lora_v.weight], 'i *')
lora_qkv_weight = F.pad(lora_qkv_weight, (0, fused_proj_out_dim - lora_qkv_weight.shape[1]))
lora_qkv_weight = rearrange(lora_qkv_weight, 'i o -> o i')
lora_o_weight = rearrange(lora_o.weight, 'i o -> o i')
fused_attn_ff_weight.add_(lora_qkv_weight)
attn_out_weight.add_(lora_o_weight)
# researcher train palm parameters first
# before finetuning
def palm_parameters(self):
return set(self.parameters()) - set(self.finetune_modules.parameters())
def finetune_parameters(self, scope = 'default'):
assert scope in self.finetune_modules, f'finetune parameters of scope {scope} not found'
return self.finetune_modules[scope].parameters()
# generate function
@torch.no_grad()
@eval_decorator
def generate(
self,
seq_len,
prompt = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
pad_value = 0.,
eos_token = None,
return_seq_without_prompt = True,
use_tqdm = False,
**kwargs
):
if not exists(prompt):
prompt = torch.randint(0, self.num_tokens, (1, 1))
prompt = prompt.to(self.device)
return_seq_without_prompt = False
prompt, leading_dims = pack([prompt], '* n')
n, out = prompt.shape[-1], prompt.clone()
wrapper_fn = identity if not use_tqdm else tqdm
sample_num_times = max(1, seq_len - prompt.shape[-1])
for _ in wrapper_fn(range(sample_num_times)):
logits, embeds = self.forward(out, return_logits_with_embedding = True, **kwargs)
logits, embeds = logits[:, -1], embeds[:, -1]
if exists(filter_logits_fn):
logits = filter_logits_fn(logits, thres = filter_thres)
sample = gumbel_sample(logits, temperature = temperature, dim = -1)
out, _ = pack([out, sample], 'b *')
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, pad_value)
break
out, = unpack(out, leading_dims, '* n')
if not return_seq_without_prompt:
return out
return out[..., n:]
def forward(
self,
x,
return_loss = False,
disable_lora = False,
finetune_scope = None,
extra_embed = None,
return_only_embedding = False,
return_logits_with_embedding = False
):
if return_loss:
x, labels = x[:, :-1], x[:, 1:]
# mask if encoder
# treat any token ids that are negative as tokens to mask out - only needed if not autoregressive
if not self.causal:
mask = x >= 0
x = x.masked_fill(~mask, 0)
else:
mask = None
# get token embedding
x = self.token_emb(x)
if exists(extra_embed):
x = x + extra_embed
# finetune modules
finetune_modules = tuple()
if exists(finetune_scope) and not disable_lora:
assert finetune_scope in self.finetune_modules
finetune_modules = self.finetune_modules[finetune_scope]
# parallel attention / ff blocks, passing in finetuning loras
for layer, finetune_modules in zip_longest(self.layers, finetune_modules):
x = layer(x, mask = mask, finetune_modules = finetune_modules)
# final norm
embeds = self.norm(x)
if return_only_embedding:
return embeds
# to logits
logits = self.to_logits(embeds)
ret = (logits, embeds) if return_logits_with_embedding else logits
if not return_loss:
return ret
logits = rearrange(logits, 'b n c -> b c n')
return F.cross_entropy(logits, labels, ignore_index = self.cross_entropy_ignore_index) | PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/palm.py |
from palm_rlhf_pytorch.palm import PaLM
from palm_rlhf_pytorch.reward import RewardModel
from palm_rlhf_pytorch.ppo import RLHFTrainer, ActorCritic
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/__init__.py |
import math
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange
def exists(val):
return val is not None
# decorators
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# tensor helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def masked_mean(seq, mask = None, dim = 1, keepdim = False):
if not exists(mask):
return seq.mean(dim = dim)
if seq.ndim == 3:
mask = rearrange(mask, 'b n -> b n 1')
masked_seq = seq.masked_fill(~mask, 0.)
numer = masked_seq.sum(dim = dim, keepdim = keepdim)
denom = mask.sum(dim = dim, keepdim = keepdim)
masked_mean = numer / denom.clamp(min = 1e-3)
masked_mean = masked_mean.masked_fill(denom == 0, 0.)
return masked_mean
# sampling helpers
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/utils.py |
from torch.optim import AdamW, Adam
from lion_pytorch import Lion
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = False,
group_wd_params = True,
use_lion = True,
**kwargs
):
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if group_wd_params and wd > 0:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
if use_lion:
return Lion(params, lr = lr, betas = betas, weight_decay = wd)
if wd == 0:
return Adam(params, lr = lr, betas = betas, eps = eps)
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/optimizer.py |
import torch
from torch import nn
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# LoRA - https://arxiv.org/abs/2106.09685
class LoRA(nn.Module):
def __init__(
self,
dim,
dim_out,
r = 8,
alpha = None
):
super().__init__()
alpha = default(alpha, r)
self.scale = alpha / r
self.A = nn.Parameter(torch.randn(dim, r))
self.B = nn.Parameter(torch.zeros(r, dim_out))
@property
def weight(self):
return (self.A @ self.B) * self.scale
def forward(self, x):
return x @ self.weight
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/lora.py |
import math
from pathlib import Path
import copy
from tqdm import tqdm
from functools import partial
from collections import deque, namedtuple
from random import randrange
from beartype import beartype
from beartype.typing import List, Optional, Callable, Deque
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from palm_rlhf_pytorch.palm import PaLM
from palm_rlhf_pytorch.reward import RewardModel
from palm_rlhf_pytorch.optimizer import get_optimizer
from palm_rlhf_pytorch.utils import masked_mean, eval_decorator
from accelerate import Accelerator
# actor critic - PaLM with lora
PPOActionCriticReturn = namedtuple('PPOActionCriticReturn', [
'actions',
'sequence',
'mask',
'prompt_mask',
'action_logits',
'values'
])
@beartype
class ActorCritic(nn.Module):
def __init__(
self,
palm: PaLM,
critic_palm: Optional[PaLM] = None,
pooled_values = False,
actor_lora = True,
critic_lora = True,
actor_lora_r = 8,
critic_lora_r = 8,
actor_lora_scope = 'actor',
critic_lora_scope = 'critic',
actor_dropout = 0.,
critic_dropout = 0.
):
super().__init__()
self.actor_palm = palm
self.critic_palm = critic_palm
if not exists(self.critic_palm):
self.critic_palm = copy.deepcopy(palm)
self.actor_palm.set_dropout(actor_dropout)
self.critic_palm.set_dropout(critic_dropout)
self.actor_lora = actor_lora
self.critic_lora = critic_lora
self.actor_lora_scope = actor_lora_scope if actor_lora else None
self.critic_lora_scope = critic_lora_scope if critic_lora else None
if self.actor_lora:
self.actor_palm.add_finetune_params(actor_lora_scope, lora_r = actor_lora_r)
if self.critic_lora:
self.critic_palm.add_finetune_params(critic_lora_scope, lora_r = critic_lora_r)
self.pooled_values = pooled_values
self.value_head = nn.Sequential(
nn.Linear(palm.dim, 1),
Rearrange('... 1 -> ...')
)
nn.init.zeros_(self.value_head[0].bias)
nn.init.orthogonal_(self.value_head[0].weight, gain = math.sqrt(2))
def actor_parameters(self):
if not self.actor_lora:
return self.actor_palm.parameters()
return [
*self.actor_palm.finetune_parameters(self.actor_lora_scope)
]
def critic_parameters(self):
if not self.actor_lora:
return [*self.critic_palm.parameters(), *self.value_head.parameters()]
return [
*self.critic_palm.finetune_parameters(self.critic_lora_scope),
*self.value_head.parameters()
]
@torch.no_grad()
@eval_decorator
def generate(
self,
state,
max_seq_len,
eos_token = None,
return_values = False,
**kwargs
):
actions = self.actor_palm.generate(
max_seq_len,
prompt = state,
eos_token = eos_token,
finetune_scope = self.actor_lora_scope,
use_tqdm = True,
**kwargs
)
sequence = torch.cat((state, actions), dim = -1)
action_len = actions.shape[-1]
state_len = state.shape[-1]
prompt_mask = torch.arange(sequence.shape[-1], device = state.device) < state_len
prompt_mask = repeat(prompt_mask, 'n -> b n', b = sequence.shape[0])
action_mask = ~prompt_mask
mask = None
if exists(eos_token):
mask = ((sequence == eos_token).cumsum(dim = -1) == 0)
mask = F.pad(mask, (1, -1), value = True) # include eos token
action_mask &= mask
action_logits, value = self.forward(
sequence,
mask = action_mask,
return_values = return_values
)
return PPOActionCriticReturn(
actions,
sequence,
mask,
prompt_mask,
action_logits,
value
)
def forward(
self,
x,
mask = None,
return_values = True
):
action_logits = self.actor_palm(
x,
finetune_scope = self.actor_lora_scope
)
if not return_values:
return action_logits, None
critic_embeds = self.critic_palm(
x,
return_only_embedding = True,
finetune_scope = self.critic_lora_scope
)
if self.pooled_values:
critic_embeds = shift(critic_embeds, shift = 1, dim = -2)
critic_embeds = masked_mean(critic_embeds, mask, dim = 1)
values = self.value_head(critic_embeds)
return action_logits, values
# data
Memory = namedtuple('Memory', [
'sequence',
'prompt_mask',
'mask',
'action_prob',
'action_log_prob',
'reward',
'value'
])
@beartype
class ExperienceDataset(Dataset):
def __init__(
self,
data: List[torch.Tensor],
device = None
):
super().__init__()
self.data = data
self.device = device
def __len__(self):
return self.data[0].shape[0]
def __getitem__(self, ind):
return tuple(map(lambda t: t[ind].to(self.device), self.data))
def create_dataloader(data, batch_size, shuffle = True, device = None, **kwargs):
ds = ExperienceDataset(data, device = device)
return DataLoader(ds, batch_size = batch_size, shuffle = shuffle, **kwargs)
# helper functions
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def masked_normalize(t, eps = 1e-5, mask = None, dim = None):
dim = default(dim, tuple(range(t.ndim)))
kwargs = dict(dim = dim, keepdim = True)
mean = masked_mean(t, mask = mask, **kwargs)
mean_centered = t - mean
var = masked_mean(mean_centered ** 2, mask = mask, **kwargs)
return mean_centered * var.clamp(min = eps).rsqrt()
def pad_sequence_fixed(sequences, *args, **kwargs):
first_el = sequences[0]
has_no_dimension = first_el.ndim == 0
# if no dimensions, add a single dimension
if has_no_dimension:
sequences = tuple(map(lambda t: t[None], sequences))
out = pad_sequence(sequences, *args, **kwargs)
if has_no_dimension:
out = rearrange(out, '... 1 -> ...')
return out
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def log_prob(prob, indices):
assert prob.shape[:2] == indices.shape, f'preceding shapes of prob {prob.shape[:2]} and indices {indices.shape} must match'
return log(prob.gather(-1, indices[..., None])).squeeze(-1)
def shift(t, value = 0, shift = 1, dim = -1):
zeros = (0, 0) * (-dim - 1)
return F.pad(t, (*zeros, shift, -shift), value = value)
def masked_entropy(prob, dim = -1, mask = None):
entropies = (prob * log(prob)).sum(dim = -1)
return masked_mean(entropies, mask = mask).mean()
def masked_kl_div(prob1, prob2, mask = None, reduce_batch = False):
"""
need to account for variable sequence lengths, therefore not using the built-in functional version
"""
kl_divs = (prob1 * (log(prob1) - log(prob2))).sum(dim = -1)
loss = masked_mean(kl_divs, mask)
if reduce_batch:
return loss.mean()
return loss
def clipped_value_loss(values, rewards, old_values, clip):
value_clipped = old_values + (values - old_values).clamp(-clip, clip)
value_loss_1 = (value_clipped.flatten() - rewards) ** 2
value_loss_2 = (values.flatten() - rewards) ** 2
return torch.mean(torch.max(value_loss_1, value_loss_2))
# rlhf trainer
@beartype
class RLHFTrainer(nn.Module):
def __init__(
self,
*,
prompts: Optional[List[str]] = None,
prompts_path: Optional[str] = None,
prompt_token_ids: Optional[torch.Tensor] = None,
tokenizer: Callable = None,
palm: PaLM,
reward_model: RewardModel,
critic_palm: Optional[PaLM] = None,
actor_critic: Optional[ActorCritic] = None,
actor_lr = 1e-4,
critic_lr = 1e-4,
actor_wd = 0.,
critic_wd = 0.,
actor_adam_eps = 1e-7,
critic_adam_eps = 1e-7,
actor_lora = True,
critic_lora = True,
actor_lora_r = 8,
critic_lora_r = 8,
critic_pooled_values = True,
actor_dropout = 0.,
critic_dropout = 0.,
betas = (0.9, 0.999),
max_norm = None,
eps_clip = 0.2,
value_clip = 0.4,
beta_s = .01,
pad_value = 0.,
minibatch_size = 16,
epochs = 1,
kl_div_loss_weight = 0.1, # between old action probs and new action probs - not sure what the right value is
accelerate_kwargs: dict = {},
use_lion = False
):
super().__init__()
self.accelerate = Accelerator(**accelerate_kwargs)
# take care of prompts -> token ids
assert (exists(prompts) + exists(prompts_path) + exists(prompt_token_ids)) == 1
if exists(prompts_path):
path = Path(prompts_path)
prompts = path.read_text().split('\n')
if exists(prompts):
assert len(prompts) > 0, 'no prompts'
assert exists(tokenizer), 'tokenizer must be passed in if raw text prompts are given'
prompt_token_ids = tokenizer(prompts)
self.pad_value = pad_value # token pad value
self.num_prompts = prompt_token_ids.shape[0]
self.register_buffer('prompt_token_ids', prompt_token_ids)
# models
self.palm = palm
if not exists(actor_critic):
actor_critic = ActorCritic(
palm = palm,
critic_palm = critic_palm,
actor_lora = actor_lora,
critic_lora = critic_lora,
actor_lora_r = actor_lora_r,
critic_lora_r = critic_lora_r,
pooled_values = critic_pooled_values,
actor_dropout = actor_dropout,
critic_dropout = critic_dropout
).to(palm.device)
self.actor_critic = actor_critic
self.reward_model = reward_model.eval()
# train hyperparameters
self.epochs = epochs
self.minibatch_size = minibatch_size
self.max_norm = max_norm
self.kl_div_loss_weight = kl_div_loss_weight
# optimizers
self.actor_optim = get_optimizer(actor_critic.actor_parameters(), lr = actor_lr, wd = actor_wd, betas = betas, eps = actor_adam_eps, use_lion = use_lion)
self.critic_optim = get_optimizer(actor_critic.critic_parameters(), lr = critic_lr, wd = critic_wd, betas = betas, eps = critic_adam_eps, use_lion = use_lion)
# ppo hyperparams
self.eps_clip = eps_clip
self.value_clip = value_clip
self.beta_s = beta_s
# prepare with accelerator
(
self.actor_critic,
self.reward_model,
self.actor_optim,
self.critic_optim
) = self.accelerate.prepare(
self.actor_critic,
self.reward_model,
self.actor_optim,
self.critic_optim
)
def print(self, msg):
return self.accelerate.print(msg)
def save(self, filepath = './checkpoint.pt'):
torch.save(self.actor_critic.state_dict(), filepath)
def load(self, filepath = './checkpoint.pt'):
state_dict = torch.load(filepath)
self.actor_critic.load_state_dict(state_dict)
@property
def device(self):
return self.accelerate.device
@torch.no_grad()
def generate(
self,
max_seq_len,
*args,
prompt,
num_samples = 4, # sample 4 per prompt and select the one with highest reward
**kwargs
):
assert prompt.ndim == 1, 'only one prompt allowed at a time for now'
prompt = repeat(prompt, 'n -> b n', b = num_samples)
actor_critic = self.accelerate.unwrap_model(self.actor_critic)
reward_model = self.accelerate.unwrap_model(self.reward_model)
actor_critic.eval()
(
actions,
sequences,
mask,
prompt_mask,
action_logits,
_
) = actor_critic.generate(
prompt,
*args,
max_seq_len = max_seq_len,
return_values = False,
**kwargs
)
rewards = reward_model(
sequences,
prompt_mask = prompt_mask,
mask = mask,
sample = True
)
best_sequence_index = rewards.topk(1, dim = -1).indices
best_sequence = sequences[best_sequence_index]
best_sequence = rearrange(best_sequence, '1 ... -> ...')
return best_sequence
def learn(
self,
memories: Deque[Memory]
):
# stack all data stored in the memories
all_memories_stacked_and_padded = list(map(partial(pad_sequence_fixed, batch_first = True), zip(*memories)))
# prepare dataloader for policy phase training
dl = create_dataloader(all_memories_stacked_and_padded, self.minibatch_size, device = self.device)
self.actor_critic.train()
# PPO training
for _ in range(self.epochs):
for (
sequences,
prompt_masks,
masks,
old_action_probs,
old_log_probs,
rewards,
old_values
) in dl:
action_masks = ~prompt_masks & masks
action_logits, values = self.actor_critic(
sequences,
mask = action_masks
)
action_logits = shift(action_logits, shift = 1, dim = -2) # need to shift along sequence dimension by 1, since actions start from the last prompt (state) token
action_len = old_log_probs.shape[-1]
action_probs = action_logits.softmax(dim = -1)
action_log_probs = log_prob(action_probs, sequences)
action_log_probs = action_log_probs[:, -action_len:]
# calculate entropies, taking into account which part of the sequence is actually an action
entropies = masked_entropy(action_probs, mask = action_masks)
# calculate kl div between old action probs and new ones, taking into account which part of the sequence is action or not
kl_penalty = 0.
if self.kl_div_loss_weight > 0:
kl_penalty = masked_kl_div(old_action_probs, action_probs, mask = action_masks) * self.kl_div_loss_weight
# subtract the kl penalty from the rewards
rewards = rewards - kl_penalty
# handle non-pooled values
normalize_kwargs = dict()
if old_values.ndim == 2:
old_values, values = map(lambda t: shift(t, shift = 1, dim = -2), (old_values, values))
old_values = old_values[:, -action_len:]
values = values[:, -action_len:]
rewards = rearrange(rewards, 'b -> b 1')
normalize_kwargs = dict(dim = -1, mask = action_masks[:, -action_len:])
if values.ndim < rewards.ndim:
values = rearrange(values, '... -> ... 1')
# calculate clipped surrogate objective, classic PPO loss
ratios = (action_log_probs - old_log_probs).exp()
advantages = masked_normalize(rewards - old_values, **normalize_kwargs)
if advantages.ndim == 1:
advantages = rearrange(advantages, 'b -> b 1')
surr1 = ratios * advantages
surr2 = ratios.clamp(1 - self.eps_clip, 1 + self.eps_clip) * advantages
policy_loss = - torch.min(surr1, surr2) - self.beta_s * entropies
# combine losses
loss = policy_loss.mean()
# update actor
self.accelerate.backward(loss)
self.print(f'policy_loss: {loss.item():.3f}')
if exists(self.max_norm):
self.accelerator.clip_grad_norm_(self.actor_critic.actor_parameters(), self.max_norm)
self.actor_optim.step()
self.actor_optim.zero_grad()
# calculate value loss and update value network separate from policy network
value_loss = clipped_value_loss(values, rewards.detach(), old_values, self.value_clip)
value_loss = value_loss.mean()
self.print(f'critic_loss: {value_loss.item():.3f}')
self.accelerate.backward(value_loss)
if exists(self.max_norm):
self.accelerator.clip_grad_norm_(self.actor_critic.critic_parameters(), self.max_norm)
self.critic_optim.step()
self.critic_optim.zero_grad()
def train(
self,
num_episodes = 50000,
max_timesteps = 500,
update_timesteps = 5000,
max_batch_size = 16,
max_seq_len = 2048,
eos_token = None,
temperature = 1.
):
device = self.device
time = 0
memories = deque([])
for eps in tqdm(range(num_episodes), desc = 'episodes'):
for timestep in range(max_timesteps):
time += 1
# select a bunch of random states (prompts)
# and get the action (sampled sequence from palm as well as the action probs)
# also calculate the reward using reward model and store
rand_prompt_index = randrange(0, self.num_prompts)
state = self.prompt_token_ids[rand_prompt_index]
# remove padding from state
state_mask = state != self.pad_value
state = state[state_mask]
# get predicted sequence
(
actions,
sequence,
mask,
prompt_mask,
action_logits,
value
) = self.actor_critic.generate(
rearrange(state, 'n -> 1 n'),
max_seq_len = max_seq_len,
eos_token = eos_token,
temperature = temperature,
return_values = True
)
action_logits = shift(action_logits, shift = 1, dim = -2) # need to shift along sequence dimension by 1, since actions start from the last prompt (state) token
action_prob = action_logits.softmax(dim = -1)
action_len = actions.shape[-1]
action_log_prob = log_prob(action_prob, sequence)
action_log_prob = action_log_prob[:, -action_len:]
actions = rearrange(actions, '1 ... -> ...')
# get reward as given by supervised trained reward model
sequence = torch.cat((state, actions), dim = 0)
prompt_length = len(state)
prompt_mask = torch.arange(sequence.shape[-1], device = device) < prompt_length
sequence = rearrange(sequence, 'n -> 1 n')
prompt_mask = rearrange(prompt_mask, 'n -> 1 n')
mask = default(mask, lambda: torch.ones(sequence.shape, dtype = torch.bool, device = device))
reward = self.reward_model(
sequence,
prompt_mask = prompt_mask,
mask = mask,
sample = True
)
detach_to_cpu_ = lambda t: rearrange(t.detach().cpu(), '1 ... -> ...')
# store memory for learning
memories.append(Memory(*map(detach_to_cpu_, (
sequence,
prompt_mask,
mask,
action_prob,
action_log_prob,
reward,
value
))))
# learn from the stored memories
if time % update_timesteps == 0:
self.learn(memories)
memories.clear()
print('rlhf training complete')
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/ppo.py |
import copy
from pathlib import Path
from tqdm import tqdm
from beartype import beartype
from beartype.typing import Tuple, Optional
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from palm_rlhf_pytorch.utils import masked_mean, gumbel_sample
from palm_rlhf_pytorch.palm import PaLM
# helper functions
def exists(val):
return val is not None
# Reward Model - PaLM with a scalar head
@beartype
class RewardModel(nn.Module):
def __init__(
self,
palm: PaLM,
dropout = 0.1,
num_binned_output = 0.,
use_lora = True,
lora_r = 8,
reward_lora_scope = 'reward',
):
super().__init__()
self.palm = copy.deepcopy(palm)
self.palm.set_dropout(dropout)
self.reward_lora_scope = reward_lora_scope if use_lora else None
if exists(self.reward_lora_scope):
self.palm.add_finetune_params(reward_lora_scope, lora_r = lora_r)
dim = palm.dim
self.binned_output = num_binned_output > 1
self.prompt_embed = nn.Parameter(torch.zeros(1, 1, dim))
self.response_embed = nn.Parameter(torch.zeros(1, 1, dim))
if self.binned_output:
self.to_pred = nn.Linear(dim, num_binned_output)
else:
self.to_pred = nn.Sequential(
nn.Linear(dim, 1, bias = False),
Rearrange('... 1 -> ...')
)
def load(self, path):
path = Path(path)
assert path.exists()
self.load_state_dict(torch.load(str(path)))
def finetune_parameters(self):
return [
*self.to_pred.parameters(),
*(self.palm.finetune_parameters(self.reward_lora_scope) if exists(self.reward_lora_scope) else self.palm.parameters())
]
def forward(
self,
x,
mask = None,
prompt_mask = None,
prompt_lengths = None,
labels = None,
sample = False,
sample_temperature = 1.,
disable_lora = False
):
assert not (exists(prompt_mask) and exists(prompt_lengths))
# derive prompt mask from prompt lengths
if exists(prompt_lengths):
batch, seq_len = x.shape
arange = torch.arange(seq_len, device = x.device)
prompt_mask = repeat(arange, 'n -> b n', b = batch) < rearrange(prompt_lengths, 'b -> b 1')
# reward model should have an understanding of which section is prompt, and which section is response
extra_embed = None
if exists(prompt_mask):
extra_embed = torch.where(
rearrange(prompt_mask, 'b n -> b n 1'),
self.prompt_embed,
self.response_embed
)
# get embeddings from palm
embeds = self.palm(
x,
extra_embed = extra_embed,
return_only_embedding = True,
disable_lora = disable_lora,
finetune_scope = self.reward_lora_scope
)
pooled = masked_mean(embeds, mask, dim = 1)
pred = self.to_pred(pooled)
if sample and self.binned_output:
assert not exists(labels)
pred = gumbel_sample(pred, temperature = sample_temperature, dim = -1)
if not exists(labels):
return pred
if not self.binned_output:
return F.mse_loss(pred, labels)
return F.cross_entropy(pred, labels)
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/reward.py |
from setuptools import setup, find_packages
setup(
name = 'lion-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.2',
license='MIT',
description = 'Lion Optimizer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/lion-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'optimizers'
],
install_requires=[
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| lion-pytorch-main | setup.py |
import torch
try:
import triton
import triton.language as tl
except ImportError as e:
print('triton is not installed, please install by running `pip install triton -U --pre`')
exit()
# clone param and exp_avg before autotuning takes place
# as those are updated in-place
def clone_inplace_updated_params(nargs):
nargs['p_ptr'] = nargs['p_ptr'].clone()
nargs['exp_avg_ptr'] = nargs['exp_avg_ptr'].clone()
# triton cuda kernel
@triton.autotune(configs = [
triton.Config({'BLOCK_SIZE': 128}, num_warps = 4, pre_hook = clone_inplace_updated_params),
triton.Config({'BLOCK_SIZE': 1024}, num_warps = 8, pre_hook = clone_inplace_updated_params),
], key = ['n_elements'])
@triton.jit
def update_fn_kernel(
p_ptr,
grad_ptr,
exp_avg_ptr,
lr,
wd,
beta1,
beta2,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis = 0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# offsetted pointers
offset_p_ptr = p_ptr + offsets
offset_grad_ptr = grad_ptr + offsets
offset_exp_avg_ptr = exp_avg_ptr + offsets
# load
p = tl.load(offset_p_ptr, mask = mask)
grad = tl.load(offset_grad_ptr, mask = mask)
exp_avg = tl.load(offset_exp_avg_ptr, mask = mask)
# stepweight decay
p = p * (1 - lr * wd)
# diff between momentum running average and grad
diff = exp_avg - grad
# weight update
update = diff * beta1 + grad
# torch.sign
can_update = update != 0
update_sign = tl.where(update > 0, -lr, lr)
p = p + update_sign * can_update
# decay the momentum running average coefficient
exp_avg = diff * beta2 + grad
# store new params and momentum running average coefficient
tl.store(offset_p_ptr, p, mask = mask)
tl.store(offset_exp_avg_ptr, exp_avg, mask = mask)
def update_fn(
p: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
lr: float,
wd: float,
beta1: float,
beta2: float
):
assert all([t.is_cuda for t in (p, grad, exp_avg)])
n_elements = p.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
update_fn_kernel[grid](
p,
grad,
exp_avg,
lr,
wd,
beta1,
beta2,
n_elements
)
| lion-pytorch-main | lion_pytorch/triton.py |
from typing import Tuple, Optional, Callable
import torch
from torch.optim.optimizer import Optimizer
# functions
def exists(val):
return val is not None
# update functions
def update_fn(p, grad, exp_avg, lr, wd, beta1, beta2):
# stepweight decay
p.data.mul_(1 - lr * wd)
# weight update
update = exp_avg.clone().mul_(beta1).add(grad, alpha = 1 - beta1).sign_()
p.add_(update, alpha = -lr)
# decay the momentum running average coefficient
exp_avg.mul_(beta2).add_(grad, alpha = 1 - beta2)
# class
class Lion(Optimizer):
def __init__(
self,
params,
lr: float = 1e-4,
betas: Tuple[float, float] = (0.9, 0.99),
weight_decay: float = 0.0,
use_triton: bool = False
):
assert lr > 0.
assert all([0. <= beta <= 1. for beta in betas])
defaults = dict(
lr = lr,
betas = betas,
weight_decay = weight_decay
)
super().__init__(params, defaults)
self.update_fn = update_fn
if use_triton:
from lion_pytorch.triton import update_fn as triton_update_fn
self.update_fn = triton_update_fn
@torch.no_grad()
def step(
self,
closure: Optional[Callable] = None
):
loss = None
if exists(closure):
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in filter(lambda p: exists(p.grad), group['params']):
grad, lr, wd, beta1, beta2, state = p.grad, group['lr'], group['weight_decay'], *group['betas'], self.state[p]
# init state - exponential moving average of gradient values
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
exp_avg = state['exp_avg']
self.update_fn(
p,
grad,
exp_avg,
lr,
wd,
beta1,
beta2
)
return loss
| lion-pytorch-main | lion_pytorch/lion_pytorch.py |
from lion_pytorch.lion_pytorch import Lion
| lion-pytorch-main | lion_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'CoCa-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.12',
license='MIT',
description = 'CoCa, Contrastive Captioners are Image-Text Foundation Models - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/CoCa-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'contrastive learning',
'multimodal'
],
install_requires=[
'einops>=0.4',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| CoCa-pytorch-main | setup.py |
from coca_pytorch.coca_pytorch import CoCa
| CoCa-pytorch-main | coca_pytorch/__init__.py |
import torch
from torch import einsum, nn
import torch.nn.functional as F
from torch.autograd import Function
import torch.distributed as dist
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# distributed
def all_gather_variable_batch(t):
device, rank, world_size = t.device, dist.get_rank(), dist.get_world_size()
size = torch.tensor(t.shape[0], device = device, dtype = torch.long)
sizes = [torch.empty_like(size, device = device, dtype = torch.long) for i in range(world_size)]
dist.all_gather(sizes, size)
sizes = torch.stack(sizes)
max_size = sizes.amax().item()
padded_t = pad_dim_to(t, max_size, dim = 0)
gathered_tensors = [torch.empty_like(padded_t, device = device, dtype = padded_t.dtype) for i in range(world_size)]
dist.all_gather(gathered_tensors, padded_t)
gathered_tensor = torch.cat(gathered_tensors)
seq = torch.arange(max_size, device = device)
mask = rearrange(seq, 'j -> 1 j') < rearrange(sizes, 'i -> i 1')
mask = rearrange(mask, 'i j -> (i j)')
gathered_tensor = gathered_tensor[mask]
sizes = sizes.tolist()
return gathered_tensor, sizes
class AllGather(Function):
@staticmethod
def forward(ctx, x):
assert dist.is_initialized() and dist.get_world_size() > 1
x, batch_sizes = all_gather_variable_batch(x)
ctx.batch_sizes = batch_sizes
return x
@staticmethod
def backward(ctx, grads):
batch_sizes, rank = ctx.batch_sizes, dist.get_rank()
grads_by_rank = grads.split(batch_sizes, dim = 0)
return grads_by_rank[rank]
all_gather = AllGather.apply
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
# to latents
class EmbedToLatents(nn.Module):
def __init__(self, dim, dim_latents):
super().__init__()
self.to_latents = nn.Linear(dim, dim_latents, bias=False)
def forward(self, x):
latents = self.to_latents(x)
return F.normalize(latents, dim=-1)
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.rotary_emb = RotaryEmbedding(dim_head)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.mask = None
self.pos_emb = None
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n].to(device)
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.mask = mask
return mask
def get_rotary_embedding(self, n, device):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n].to(device)
pos_emb = self.rotary_emb(n, device=device)
self.pos_emb = pos_emb
return pos_emb
def forward(self, x, attn_mask=None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# rotary embeddings
positions = self.get_rotary_embedding(n, device)
q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k)
# causal mask
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# extra attention mask - for masking out attention from text CLS token to padding
if exists(attn_mask):
attn_mask = rearrange(attn_mask, 'b i j -> b 1 i j')
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
return self.attn_out(out) + self.ff_out(ff)
# cross attention - using multi-query + one-headed key / values as in PaLM w/ optional parallel feedforward
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim=None,
dim_head=64,
heads=8,
parallel_ff=False,
ff_mult=4,
norm_context=False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(context_dim) if norm_context else nn.Identity()
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(context_dim, dim_head * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
# whether to have parallel feedforward
ff_inner_dim = ff_mult * dim
self.ff = nn.Sequential(
nn.Linear(dim, ff_inner_dim * 2, bias=False),
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
) if parallel_ff else None
def forward(self, x, context):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
# pre-layernorm, for queries and context
x = self.norm(x)
context = self.context_norm(context)
# get queries
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# scale
q = q * self.scale
# get key / values
k, v = self.to_kv(context).chunk(2, dim=-1)
# query / key similarity
sim = einsum('b h i d, b j d -> b h i j', q, k)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True)
attn = sim.softmax(dim=-1)
# aggregate
out = einsum('b h i j, b j d -> b h i d', attn, v)
# merge and combine heads
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
# add parallel feedforward (for multimodal layers)
if exists(self.ff):
out = out + self.ff(x)
return out
# transformer
class CoCa(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
unimodal_depth,
multimodal_depth,
dim_latents = None,
image_dim = None,
num_img_queries=256,
dim_head=64,
heads=8,
ff_mult=4,
img_encoder=None,
caption_loss_weight=1.,
contrastive_loss_weight=1.,
pad_id=0
):
super().__init__()
self.dim = dim
self.pad_id = pad_id
self.caption_loss_weight = caption_loss_weight
self.contrastive_loss_weight = contrastive_loss_weight
# token embeddings
self.token_emb = nn.Embedding(num_tokens, dim)
self.text_cls_token = nn.Parameter(torch.randn(dim))
# image encoder
self.img_encoder = img_encoder
# attention pooling for image tokens
self.img_queries = nn.Parameter(torch.randn(num_img_queries + 1, dim)) # num image queries for multimodal, but 1 extra CLS for contrastive learning
self.img_attn_pool = CrossAttention(dim=dim, context_dim=image_dim, dim_head=dim_head, heads=heads, norm_context=True)
self.img_attn_pool_norm = LayerNorm(dim)
self.text_cls_norm = LayerNorm(dim)
# to latents
dim_latents = default(dim_latents, dim)
self.img_to_latents = EmbedToLatents(dim, dim_latents)
self.text_to_latents = EmbedToLatents(dim, dim_latents)
# contrastive learning temperature
self.temperature = nn.Parameter(torch.Tensor([1.]))
# unimodal layers
self.unimodal_layers = nn.ModuleList([])
for ind in range(unimodal_depth):
self.unimodal_layers.append(
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
)
# multimodal layers
self.multimodal_layers = nn.ModuleList([])
for ind in range(multimodal_depth):
self.multimodal_layers.append(nn.ModuleList([
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult))
]))
# to logits
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
self.to_logits[-1].weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
# whether in data parallel setting
self.is_distributed = dist.is_initialized() and dist.get_world_size() > 1
def embed_text(self, text):
batch, device = text.shape[0], text.device
seq = text.shape[1]
text_tokens = self.token_emb(text)
# append text cls tokens
text_cls_tokens = repeat(self.text_cls_token, 'd -> b 1 d', b=batch)
text_tokens = torch.cat((text_tokens, text_cls_tokens), dim=-2)
# create specific mask for text cls token at the end
# to prevent it from attending to padding
cls_mask = rearrange(text!=self.pad_id, 'b j -> b 1 j')
attn_mask = F.pad(cls_mask, (0, 1, seq, 0), value=True)
# go through unimodal layers
for attn_ff in self.unimodal_layers:
text_tokens = attn_ff(text_tokens, attn_mask=attn_mask)
# get text cls token
text_tokens, text_cls_tokens = text_tokens[:, :-1], text_tokens[:, -1]
text_embeds = self.text_cls_norm(text_cls_tokens)
return text_embeds, text_tokens
def embed_image(self, images=None, image_tokens=None):
# encode images into embeddings
# with the img_encoder passed in at init
# it can also accept precomputed image tokens
assert not (exists(images) and exists(image_tokens))
if exists(images):
assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding'
image_tokens = self.img_encoder(images)
# attention pool image tokens
img_queries = repeat(self.img_queries, 'n d -> b n d', b=image_tokens.shape[0])
img_queries = self.img_attn_pool(img_queries, image_tokens)
img_queries = self.img_attn_pool_norm(img_queries)
return img_queries[:, 0], img_queries[:, 1:]
def forward(
self,
text,
images=None,
image_tokens=None,
labels=None,
return_loss=False,
return_embeddings=False
):
batch, device = text.shape[0], text.device
if return_loss and not exists(labels):
text, labels = text[:, :-1], text[:, 1:]
text_embeds, text_tokens = self.embed_text(text)
image_embeds, image_tokens = self.embed_image(images=images, image_tokens=image_tokens)
# return embeddings if that is what the researcher wants
if return_embeddings:
return text_embeds, image_embeds
# go through multimodal layers
for attn_ff, cross_attn in self.multimodal_layers:
text_tokens = attn_ff(text_tokens)
text_tokens = cross_attn(text_tokens, image_tokens)
logits = self.to_logits(text_tokens)
if not return_loss:
return logits
# shorthand
ce = F.cross_entropy
# calculate caption loss (cross entropy loss)
logits = rearrange(logits, 'b n c -> b c n')
caption_loss = ce(logits, labels, ignore_index=self.pad_id)
caption_loss = caption_loss * self.caption_loss_weight
# embedding to latents
text_latents = self.text_to_latents(text_embeds)
image_latents = self.img_to_latents(image_embeds)
# maybe distributed all gather
if self.is_distributed:
latents = torch.stack((text_latents, image_latents), dim = 1)
latents = all_gather(latents)
text_latents, image_latents = latents.unbind(dim = 1)
# calculate contrastive loss
sim = einsum('i d, j d -> i j', text_latents, image_latents)
sim = sim * self.temperature.exp()
contrastive_labels = torch.arange(batch, device=device)
contrastive_loss = (ce(sim, contrastive_labels) + ce(sim.t(), contrastive_labels)) * 0.5
contrastive_loss = contrastive_loss * self.contrastive_loss_weight
return caption_loss + contrastive_loss
| CoCa-pytorch-main | coca_pytorch/coca_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'se3-transformer-pytorch',
packages = find_packages(),
include_package_data = True,
version = '0.9.0',
license='MIT',
description = 'SE3 Transformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/se3-transformer-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'equivariance',
'SE3'
],
install_requires=[
'einops>=0.3',
'filelock',
'numpy',
'torch>=1.6'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
'lie_learn',
'numpy',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| se3-transformer-pytorch-main | setup.py |
import torch
import torch.nn.functional as F
from torch.optim import Adam
from einops import rearrange, repeat
import sidechainnet as scn
from se3_transformer_pytorch.se3_transformer_pytorch import SE3Transformer
torch.set_default_dtype(torch.float64)
BATCH_SIZE = 1
GRADIENT_ACCUMULATE_EVERY = 16
def cycle(loader, len_thres = 500):
while True:
for data in loader:
if data.seqs.shape[1] > len_thres:
continue
yield data
transformer = SE3Transformer(
num_tokens = 24,
dim = 8,
dim_head = 8,
heads = 2,
depth = 2,
attend_self = True,
input_degrees = 1,
output_degrees = 2,
reduce_dim_out = True,
differentiable_coors = True,
num_neighbors = 0,
attend_sparse_neighbors = True,
num_adj_degrees = 2,
adj_dim = 4,
num_degrees=2,
)
data = scn.load(
casp_version = 12,
thinning = 30,
with_pytorch = 'dataloaders',
batch_size = BATCH_SIZE,
dynamic_batching = False
)
# Add gaussian noise to the coords
# Testing the refinement algorithm
dl = cycle(data['train'])
optim = Adam(transformer.parameters(), lr=1e-4)
transformer = transformer.cuda()
for _ in range(10000):
for _ in range(GRADIENT_ACCUMULATE_EVERY):
batch = next(dl)
seqs, coords, masks = batch.seqs, batch.crds, batch.msks
seqs = seqs.cuda().argmax(dim = -1)
coords = coords.cuda().type(torch.float64)
masks = masks.cuda().bool()
l = seqs.shape[1]
coords = rearrange(coords, 'b (l s) c -> b l s c', s = 14)
# Keeping only the backbone coordinates
coords = coords[:, :, 0:3, :]
coords = rearrange(coords, 'b l s c -> b (l s) c')
seq = repeat(seqs, 'b n -> b (n c)', c = 3)
masks = repeat(masks, 'b n -> b (n c)', c = 3)
noised_coords = coords + torch.randn_like(coords).cuda()
i = torch.arange(seq.shape[-1], device = seqs.device)
adj_mat = (i[:, None] >= (i[None, :] - 1)) & (i[:, None] <= (i[None, :] + 1))
out = transformer(
seq,
noised_coords,
mask = masks,
adj_mat = adj_mat,
return_type = 1
)
denoised_coords = noised_coords + out
loss = F.mse_loss(denoised_coords[masks], coords[masks])
(loss / GRADIENT_ACCUMULATE_EVERY).backward()
print('loss:', loss.item())
optim.step()
optim.zero_grad()
| se3-transformer-pytorch-main | denoise.py |
import time
import torch
import numpy as np
from lie_learn.representations.SO3.spherical_harmonics import sh
from se3_transformer_pytorch.spherical_harmonics import get_spherical_harmonics_element
from se3_transformer_pytorch.utils import benchmark
def test_spherical_harmonics():
dtype = torch.float64
theta = 0.1 * torch.randn(32, 1024, 10, dtype=dtype)
phi = 0.1 * torch.randn(32, 1024, 10, dtype=dtype)
s0 = s1 = 0
max_error = -1.
for l in range(8):
for m in range(-l, l + 1):
start = time.time()
diff, y = benchmark(get_spherical_harmonics_element)(l, m, theta, phi)
y = y.type(torch.float32)
s0 += diff
diff, z = benchmark(sh)(l, m, theta, phi)
s1 += diff
error = np.mean(np.abs((y.cpu().numpy() - z) / z))
max_error = max(max_error, error)
print(f"l: {l}, m: {m} ", error)
time_diff_ratio = s0 / s1
assert max_error < 1e-4, 'maximum error must be less than 1e-3'
assert time_diff_ratio < 1., 'spherical harmonics must be faster than the one offered by lie_learn'
print(f"Max error: {max_error}")
print(f"Time diff: {time_diff_ratio}")
| se3-transformer-pytorch-main | tests/test_spherical_harmonics.py |
import torch
from se3_transformer_pytorch.se3_transformer_pytorch import SE3Transformer
from se3_transformer_pytorch.irr_repr import rot
from se3_transformer_pytorch.utils import torch_default_dtype, fourier_encode
def test_transformer():
model = SE3Transformer(
dim = 64,
depth = 1,
num_degrees = 2,
num_neighbors = 4,
valid_radius = 10
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
out = model(feats, coors, mask, return_type = 0)
assert out.shape == (1, 32, 64), 'output must be of the right shape'
def test_causal_se3_transformer():
model = SE3Transformer(
dim = 64,
depth = 1,
num_degrees = 2,
num_neighbors = 4,
valid_radius = 10,
causal = True
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
out = model(feats, coors, mask, return_type = 0)
assert out.shape == (1, 32, 64), 'output must be of the right shape'
def test_se3_transformer_with_global_nodes():
model = SE3Transformer(
dim = 64,
depth = 1,
num_degrees = 2,
num_neighbors = 4,
valid_radius = 10,
global_feats_dim = 16
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
global_feats = torch.randn(1, 2, 16)
out = model(feats, coors, mask, return_type = 0, global_feats = global_feats)
assert out.shape == (1, 32, 64), 'output must be of the right shape'
def test_one_headed_key_values_se3_transformer_with_global_nodes():
model = SE3Transformer(
dim = 64,
depth = 1,
num_degrees = 2,
num_neighbors = 4,
valid_radius = 10,
global_feats_dim = 16,
one_headed_key_values = True
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
global_feats = torch.randn(1, 2, 16)
out = model(feats, coors, mask, return_type = 0, global_feats = global_feats)
assert out.shape == (1, 32, 64), 'output must be of the right shape'
def test_transformer_with_edges():
model = SE3Transformer(
dim = 64,
depth = 1,
num_degrees = 2,
num_neighbors = 4,
edge_dim = 4,
num_edge_tokens = 4
)
feats = torch.randn(1, 32, 64)
edges = torch.randint(0, 4, (1, 32))
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
out = model(feats, coors, mask, edges = edges, return_type = 0)
assert out.shape == (1, 32, 64), 'output must be of the right shape'
def test_transformer_with_continuous_edges():
model = SE3Transformer(
dim = 64,
depth = 1,
attend_self = True,
num_degrees = 2,
output_degrees = 2,
edge_dim = 34
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
pairwise_continuous_values = torch.randint(0, 4, (1, 32, 32, 2))
edges = fourier_encode(
pairwise_continuous_values,
num_encodings = 8,
include_self = True
)
out = model(feats, coors, mask, edges = edges, return_type = 1)
assert True
def test_different_input_dimensions_for_types():
model = SE3Transformer(
dim_in = (4, 2),
dim = 4,
depth = 1,
input_degrees = 2,
num_degrees = 2,
output_degrees = 2,
reduce_dim_out = True
)
atom_feats = torch.randn(2, 32, 4, 1)
coors_feats = torch.randn(2, 32, 2, 3)
features = {'0': atom_feats, '1': coors_feats}
coors = torch.randn(2, 32, 3)
mask = torch.ones(2, 32).bool()
refined_coors = coors + model(features, coors, mask, return_type = 1)
assert True
def test_equivariance():
model = SE3Transformer(
dim = 64,
depth = 1,
attend_self = True,
num_neighbors = 4,
num_degrees = 2,
output_degrees = 2,
fourier_encode_dist = True
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
R = rot(15, 0, 45)
out1 = model(feats, coors @ R, mask, return_type = 1)
out2 = model(feats, coors, mask, return_type = 1) @ R
diff = (out1 - out2).max()
assert diff < 1e-4, 'is not equivariant'
def test_equivariance_with_egnn_backbone():
model = SE3Transformer(
dim = 64,
depth = 1,
attend_self = True,
num_neighbors = 4,
num_degrees = 2,
output_degrees = 2,
fourier_encode_dist = True,
use_egnn = True
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
R = rot(15, 0, 45)
out1 = model(feats, coors @ R, mask, return_type = 1)
out2 = model(feats, coors, mask, return_type = 1) @ R
diff = (out1 - out2).max()
assert diff < 1e-4, 'is not equivariant'
def test_rotary():
model = SE3Transformer(
dim = 64,
depth = 1,
attend_self = True,
num_neighbors = 4,
num_degrees = 2,
output_degrees = 2,
fourier_encode_dist = True,
rotary_position = True,
rotary_rel_dist = True
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
R = rot(15, 0, 45)
out1 = model(feats, coors @ R, mask, return_type = 1)
out2 = model(feats, coors, mask, return_type = 1) @ R
diff = (out1 - out2).max()
assert diff < 1e-4, 'is not equivariant'
def test_equivariance_linear_proj_keys():
model = SE3Transformer(
dim = 64,
depth = 1,
attend_self = True,
num_neighbors = 4,
num_degrees = 2,
output_degrees = 2,
fourier_encode_dist = True,
linear_proj_keys = True
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
R = rot(15, 0, 45)
out1 = model(feats, coors @ R, mask, return_type = 1)
out2 = model(feats, coors, mask, return_type = 1) @ R
diff = (out1 - out2).max()
assert diff < 1e-4, 'is not equivariant'
@torch_default_dtype(torch.float64)
def test_equivariance_only_sparse_neighbors():
model = SE3Transformer(
dim = 64,
depth = 1,
attend_self = True,
num_degrees = 2,
output_degrees = 2,
num_neighbors = 0,
attend_sparse_neighbors = True,
num_adj_degrees = 2,
adj_dim = 4
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
seq = torch.arange(32)
adj_mat = (seq[:, None] >= (seq[None, :] - 1)) & (seq[:, None] <= (seq[None, :] + 1))
R = rot(15, 0, 45)
out1 = model(feats, coors @ R, mask, adj_mat = adj_mat, return_type = 1)
out2 = model(feats, coors, mask, adj_mat = adj_mat, return_type = 1) @ R
diff = (out1 - out2).max()
assert diff < 1e-4, 'is not equivariant'
def test_equivariance_with_reversible_network():
model = SE3Transformer(
dim = 64,
depth = 1,
attend_self = True,
num_neighbors = 4,
num_degrees = 2,
output_degrees = 2,
reversible = True
)
feats = torch.randn(1, 32, 64)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
R = rot(15, 0, 45)
out1 = model(feats, coors @ R, mask, return_type = 1)
out2 = model(feats, coors, mask, return_type = 1) @ R
diff = (out1 - out2).max()
assert diff < 1e-4, 'is not equivariant'
def test_equivariance_with_type_one_input():
model = SE3Transformer(
dim = 64,
depth = 1,
attend_self = True,
num_neighbors = 4,
num_degrees = 2,
input_degrees = 2,
output_degrees = 2
)
atom_features = torch.randn(1, 32, 64, 1)
pred_coors = torch.randn(1, 32, 64, 3)
coors = torch.randn(1, 32, 3)
mask = torch.ones(1, 32).bool()
R = rot(15, 0, 45)
out1 = model({'0': atom_features, '1': pred_coors @ R}, coors @ R, mask, return_type = 1)
out2 = model({'0': atom_features, '1': pred_coors}, coors, mask, return_type = 1) @ R
diff = (out1 - out2).max()
assert diff < 1e-4, 'is not equivariant'
| se3-transformer-pytorch-main | tests/test_equivariance.py |
import torch
from se3_transformer_pytorch.spherical_harmonics import clear_spherical_harmonics_cache
from se3_transformer_pytorch.irr_repr import spherical_harmonics, irr_repr, compose
from se3_transformer_pytorch.utils import torch_default_dtype
@torch_default_dtype(torch.float64)
def test_irr_repr():
"""
This test tests that
- irr_repr
- compose
- spherical_harmonics
are compatible
Y(Z(alpha) Y(beta) Z(gamma) x) = D(alpha, beta, gamma) Y(x)
with x = Z(a) Y(b) eta
"""
for order in range(7):
a, b = torch.rand(2)
alpha, beta, gamma = torch.rand(3)
ra, rb, _ = compose(alpha, beta, gamma, a, b, 0)
Yrx = spherical_harmonics(order, ra, rb)
clear_spherical_harmonics_cache()
Y = spherical_harmonics(order, a, b)
clear_spherical_harmonics_cache()
DrY = irr_repr(order, alpha, beta, gamma) @ Y
d, r = (Yrx - DrY).abs().max(), Y.abs().max()
print(d.item(), r.item())
assert d < 1e-10 * r, d / r
| se3-transformer-pytorch-main | tests/test_irrep_repr.py |
import torch
from se3_transformer_pytorch.basis import get_basis, get_R_tensor, basis_transformation_Q_J
from se3_transformer_pytorch.irr_repr import irr_repr
def test_basis():
max_degree = 3
x = torch.randn(2, 1024, 3)
basis = get_basis(x, max_degree)
assert len(basis.keys()) == (max_degree + 1) ** 2, 'correct number of basis kernels'
def test_basis_transformation_Q_J():
rand_angles = torch.rand(4, 3)
J, order_out, order_in = 1, 1, 1
Q_J = basis_transformation_Q_J(J, order_in, order_out).float()
assert all(torch.allclose(get_R_tensor(order_out, order_in, a, b, c) @ Q_J, Q_J @ irr_repr(J, a, b, c)) for a, b, c in rand_angles)
| se3-transformer-pytorch-main | tests/test_basis.py |
from math import pi, sqrt
from functools import reduce
from operator import mul
import torch
from functools import lru_cache
from se3_transformer_pytorch.utils import cache
# constants
CACHE = {}
def clear_spherical_harmonics_cache():
CACHE.clear()
def lpmv_cache_key_fn(l, m, x):
return (l, m)
# spherical harmonics
@lru_cache(maxsize = 1000)
def semifactorial(x):
return reduce(mul, range(x, 1, -2), 1.)
@lru_cache(maxsize = 1000)
def pochhammer(x, k):
return reduce(mul, range(x + 1, x + k), float(x))
def negative_lpmv(l, m, y):
if m < 0:
y *= ((-1) ** m / pochhammer(l + m + 1, -2 * m))
return y
@cache(cache = CACHE, key_fn = lpmv_cache_key_fn)
def lpmv(l, m, x):
"""Associated Legendre function including Condon-Shortley phase.
Args:
m: int order
l: int degree
x: float argument tensor
Returns:
tensor of x-shape
"""
# Check memoized versions
m_abs = abs(m)
if m_abs > l:
return None
if l == 0:
return torch.ones_like(x)
# Check if on boundary else recurse solution down to boundary
if m_abs == l:
# Compute P_m^m
y = (-1)**m_abs * semifactorial(2*m_abs-1)
y *= torch.pow(1-x*x, m_abs/2)
return negative_lpmv(l, m, y)
# Recursively precompute lower degree harmonics
lpmv(l-1, m, x)
# Compute P_{l}^m from recursion in P_{l-1}^m and P_{l-2}^m
# Inplace speedup
y = ((2*l-1) / (l-m_abs)) * x * lpmv(l-1, m_abs, x)
if l - m_abs > 1:
y -= ((l+m_abs-1)/(l-m_abs)) * CACHE[(l-2, m_abs)]
if m < 0:
y = self.negative_lpmv(l, m, y)
return y
def get_spherical_harmonics_element(l, m, theta, phi):
"""Tesseral spherical harmonic with Condon-Shortley phase.
The Tesseral spherical harmonics are also known as the real spherical
harmonics.
Args:
l: int for degree
m: int for order, where -l <= m < l
theta: collatitude or polar angle
phi: longitude or azimuth
Returns:
tensor of shape theta
"""
m_abs = abs(m)
assert m_abs <= l, "absolute value of order m must be <= degree l"
N = sqrt((2*l + 1) / (4 * pi))
leg = lpmv(l, m_abs, torch.cos(theta))
if m == 0:
return N * leg
if m > 0:
Y = torch.cos(m * phi)
else:
Y = torch.sin(m_abs * phi)
Y *= leg
N *= sqrt(2. / pochhammer(l - m_abs + 1, 2 * m_abs))
Y *= N
return Y
def get_spherical_harmonics(l, theta, phi):
""" Tesseral harmonic with Condon-Shortley phase.
The Tesseral spherical harmonics are also known as the real spherical
harmonics.
Args:
l: int for degree
theta: collatitude or polar angle
phi: longitude or azimuth
Returns:
tensor of shape [*theta.shape, 2*l+1]
"""
return torch.stack([ get_spherical_harmonics_element(l, m, theta, phi) \
for m in range(-l, l+1) ],
dim = -1)
| se3-transformer-pytorch-main | se3_transformer_pytorch/spherical_harmonics.py |
import os
from math import pi
import torch
from torch import einsum
from einops import rearrange
from itertools import product
from contextlib import contextmanager
from se3_transformer_pytorch.irr_repr import irr_repr, spherical_harmonics
from se3_transformer_pytorch.utils import torch_default_dtype, cache_dir, exists, default, to_order
from se3_transformer_pytorch.spherical_harmonics import clear_spherical_harmonics_cache
# constants
CACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.equivariant_attention'))
CACHE_PATH = CACHE_PATH if not exists(os.environ.get('CLEAR_CACHE')) else None
# todo (figure ot why this was hard coded in official repo)
RANDOM_ANGLES = [
[4.41301023, 5.56684102, 4.59384642],
[4.93325116, 6.12697327, 4.14574096],
[0.53878964, 4.09050444, 5.36539036],
[2.16017393, 3.48835314, 5.55174441],
[2.52385107, 0.2908958, 3.90040975]
]
# helpers
@contextmanager
def null_context():
yield
# functions
def get_matrix_kernel(A, eps = 1e-10):
'''
Compute an orthonormal basis of the kernel (x_1, x_2, ...)
A x_i = 0
scalar_product(x_i, x_j) = delta_ij
:param A: matrix
:return: matrix where each row is a basis vector of the kernel of A
'''
_u, s, v = torch.svd(A)
kernel = v.t()[s < eps]
return kernel
def get_matrices_kernel(As, eps = 1e-10):
'''
Computes the common kernel of all the As matrices
'''
matrix = torch.cat(As, dim=0)
return get_matrix_kernel(matrix, eps)
def get_spherical_from_cartesian(cartesian, divide_radius_by = 1.0):
"""
# ON ANGLE CONVENTION
#
# sh has following convention for angles:
# :param theta: the colatitude / polar angle, ranging from 0(North Pole, (X, Y, Z) = (0, 0, 1)) to pi(South Pole, (X, Y, Z) = (0, 0, -1)).
# :param phi: the longitude / azimuthal angle, ranging from 0 to 2 pi.
#
# the 3D steerable CNN code therefore (probably) has the following convention for alpha and beta:
# beta = pi - theta; ranging from 0(South Pole, (X, Y, Z) = (0, 0, -1)) to pi(North Pole, (X, Y, Z) = (0, 0, 1)).
# alpha = phi
#
"""
# initialise return array
spherical = torch.zeros_like(cartesian)
# indices for return array
ind_radius, ind_alpha, ind_beta = 0, 1, 2
cartesian_x, cartesian_y, cartesian_z = 2, 0, 1
# get projected radius in xy plane
r_xy = cartesian[..., cartesian_x] ** 2 + cartesian[..., cartesian_y] ** 2
# get second angle
# version 'elevation angle defined from Z-axis down'
spherical[..., ind_beta] = torch.atan2(torch.sqrt(r_xy), cartesian[..., cartesian_z])
# get angle in x-y plane
spherical[...,ind_alpha] = torch.atan2(cartesian[...,cartesian_y], cartesian[...,cartesian_x])
# get overall radius
radius = torch.sqrt(r_xy + cartesian[...,cartesian_z]**2)
if divide_radius_by != 1.0:
radius /= divide_radius_by
spherical[..., ind_radius] = radius
return spherical
def kron(a, b):
"""
A part of the pylabyk library: numpytorch.py at https://github.com/yulkang/pylabyk
Kronecker product of matrices a and b with leading batch dimensions.
Batch dimensions are broadcast. The number of them mush
:type a: torch.Tensor
:type b: torch.Tensor
:rtype: torch.Tensor
"""
res = einsum('... i j, ... k l -> ... i k j l', a, b)
return rearrange(res, '... i j k l -> ... (i j) (k l)')
def get_R_tensor(order_out, order_in, a, b, c):
return kron(irr_repr(order_out, a, b, c), irr_repr(order_in, a, b, c))
def sylvester_submatrix(order_out, order_in, J, a, b, c):
''' generate Kronecker product matrix for solving the Sylvester equation in subspace J '''
R_tensor = get_R_tensor(order_out, order_in, a, b, c) # [m_out * m_in, m_out * m_in]
R_irrep_J = irr_repr(J, a, b, c) # [m, m]
R_tensor_identity = torch.eye(R_tensor.shape[0])
R_irrep_J_identity = torch.eye(R_irrep_J.shape[0])
return kron(R_tensor, R_irrep_J_identity) - kron(R_tensor_identity, R_irrep_J.t()) # [(m_out * m_in) * m, (m_out * m_in) * m]
@cache_dir(CACHE_PATH)
@torch_default_dtype(torch.float64)
@torch.no_grad()
def basis_transformation_Q_J(J, order_in, order_out, random_angles = RANDOM_ANGLES):
"""
:param J: order of the spherical harmonics
:param order_in: order of the input representation
:param order_out: order of the output representation
:return: one part of the Q^-1 matrix of the article
"""
sylvester_submatrices = [sylvester_submatrix(order_out, order_in, J, a, b, c) for a, b, c in random_angles]
null_space = get_matrices_kernel(sylvester_submatrices)
assert null_space.size(0) == 1, null_space.size() # unique subspace solution
Q_J = null_space[0] # [(m_out * m_in) * m]
Q_J = Q_J.view(to_order(order_out) * to_order(order_in), to_order(J)) # [m_out * m_in, m]
return Q_J.float() # [m_out * m_in, m]
def precompute_sh(r_ij, max_J):
"""
pre-comput spherical harmonics up to order max_J
:param r_ij: relative positions
:param max_J: maximum order used in entire network
:return: dict where each entry has shape [B,N,K,2J+1]
"""
i_alpha, i_beta = 1, 2
Y_Js = {J: spherical_harmonics(J, r_ij[...,i_alpha], r_ij[...,i_beta]) for J in range(max_J + 1)}
clear_spherical_harmonics_cache()
return Y_Js
def get_basis(r_ij, max_degree, differentiable = False):
"""Return equivariant weight basis (basis)
Call this function *once* at the start of each forward pass of the model.
It computes the equivariant weight basis, W_J^lk(x), and internodal
distances, needed to compute varphi_J^lk(x), of eqn 8 of
https://arxiv.org/pdf/2006.10503.pdf. The return values of this function
can be shared as input across all SE(3)-Transformer layers in a model.
Args:
r_ij: relative positional vectors
max_degree: non-negative int for degree of highest feature-type
differentiable: whether r_ij should receive gradients from basis
Returns:
dict of equivariant bases, keys are in form '<d_in><d_out>'
"""
# Relative positional encodings (vector)
context = null_context if not differentiable else torch.no_grad
device, dtype = r_ij.device, r_ij.dtype
with context():
r_ij = get_spherical_from_cartesian(r_ij)
# Spherical harmonic basis
Y = precompute_sh(r_ij, 2 * max_degree)
# Equivariant basis (dict['d_in><d_out>'])
basis = {}
for d_in, d_out in product(range(max_degree+1), range(max_degree+1)):
K_Js = []
for J in range(abs(d_in - d_out), d_in + d_out + 1):
# Get spherical harmonic projection matrices
Q_J = basis_transformation_Q_J(J, d_in, d_out)
Q_J = Q_J.type(dtype).to(device)
# Create kernel from spherical harmonics
K_J = torch.matmul(Y[J], Q_J.T)
K_Js.append(K_J)
# Reshape so can take linear combinations with a dot product
K_Js = torch.stack(K_Js, dim = -1)
size = (*r_ij.shape[:-1], 1, to_order(d_out), 1, to_order(d_in), to_order(min(d_in,d_out)))
basis[f'{d_in},{d_out}'] = K_Js.view(*size)
# extra detach for safe measure
if not differentiable:
for k, v in basis.items():
basis[k] = v.detach()
return basis
| se3-transformer-pytorch-main | se3_transformer_pytorch/basis.py |
from math import sqrt
from itertools import product
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn, einsum
from se3_transformer_pytorch.basis import get_basis
from se3_transformer_pytorch.utils import exists, default, uniq, map_values, batched_index_select, masked_mean, to_order, fourier_encode, cast_tuple, safe_cat, fast_split, rand_uniform, broadcat
from se3_transformer_pytorch.reversible import ReversibleSequence, SequentialSequence
from se3_transformer_pytorch.rotary import SinusoidalEmbeddings, apply_rotary_pos_emb
from einops import rearrange, repeat
# fiber helpers
FiberEl = namedtuple('FiberEl', ['degrees', 'dim'])
class Fiber(nn.Module):
def __init__(
self,
structure
):
super().__init__()
if isinstance(structure, dict):
structure = [FiberEl(degree, dim) for degree, dim in structure.items()]
self.structure = structure
@property
def dims(self):
return uniq(map(lambda t: t[1], self.structure))
@property
def degrees(self):
return map(lambda t: t[0], self.structure)
@staticmethod
def create(num_degrees, dim):
dim_tuple = dim if isinstance(dim, tuple) else ((dim,) * num_degrees)
return Fiber([FiberEl(degree, dim) for degree, dim in zip(range(num_degrees), dim_tuple)])
def __getitem__(self, degree):
return dict(self.structure)[degree]
def __iter__(self):
return iter(self.structure)
def __mul__(self, fiber):
return product(self.structure, fiber.structure)
def __and__(self, fiber):
out = []
degrees_out = fiber.degrees
for degree, dim in self:
if degree in fiber.degrees:
dim_out = fiber[degree]
out.append((degree, dim, dim_out))
return out
def get_tensor_device_and_dtype(features):
first_tensor = next(iter(features.items()))[1]
return first_tensor.device, first_tensor.dtype
# classes
class ResidualSE3(nn.Module):
""" only support instance where both Fibers are identical """
def forward(self, x, res):
out = {}
for degree, tensor in x.items():
degree = str(degree)
out[degree] = tensor
if degree in res:
out[degree] = out[degree] + res[degree]
return out
class LinearSE3(nn.Module):
def __init__(
self,
fiber_in,
fiber_out
):
super().__init__()
self.weights = nn.ParameterDict()
for (degree, dim_in, dim_out) in (fiber_in & fiber_out):
key = str(degree)
self.weights[key] = nn.Parameter(torch.randn(dim_in, dim_out) / sqrt(dim_in))
def forward(self, x):
out = {}
for degree, weight in self.weights.items():
out[degree] = einsum('b n d m, d e -> b n e m', x[degree], weight)
return out
class NormSE3(nn.Module):
"""Norm-based SE(3)-equivariant nonlinearity.
Nonlinearities are important in SE(3) equivariant GCNs. They are also quite
expensive to compute, so it is convenient for them to share resources with
other layers, such as normalization. The general workflow is as follows:
> for feature type in features:
> norm, phase <- feature
> output = fnc(norm) * phase
where fnc: {R+}^m -> R^m is a learnable map from m norms to m scalars.
"""
def __init__(
self,
fiber,
nonlin = nn.GELU(),
gated_scale = False,
eps = 1e-12,
):
super().__init__()
self.fiber = fiber
self.nonlin = nonlin
self.eps = eps
# Norm mappings: 1 per feature type
self.transform = nn.ModuleDict()
for degree, chan in fiber:
self.transform[str(degree)] = nn.ParameterDict({
'scale': nn.Parameter(torch.ones(1, 1, chan)) if not gated_scale else None,
'w_gate': nn.Parameter(rand_uniform((chan, chan), -1e-3, 1e-3)) if gated_scale else None
})
def forward(self, features):
output = {}
for degree, t in features.items():
# Compute the norms and normalized features
norm = t.norm(dim = -1, keepdim = True).clamp(min = self.eps)
phase = t / norm
# Transform on norms
parameters = self.transform[degree]
gate_weights, scale = parameters['w_gate'], parameters['scale']
transformed = rearrange(norm, '... () -> ...')
if not exists(scale):
scale = einsum('b n d, d e -> b n e', transformed, gate_weights)
transformed = self.nonlin(transformed * scale)
transformed = rearrange(transformed, '... -> ... ()')
# Nonlinearity on norm
output[degree] = (transformed * phase).view(*t.shape)
return output
class ConvSE3(nn.Module):
"""A tensor field network layer
ConvSE3 stands for a Convolution SE(3)-equivariant layer. It is the
equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph
conv layer in a GCN.
At each node, the activations are split into different "feature types",
indexed by the SE(3) representation type: non-negative integers 0, 1, 2, ..
"""
def __init__(
self,
fiber_in,
fiber_out,
self_interaction = True,
pool = True,
edge_dim = 0,
fourier_encode_dist = False,
num_fourier_features = 4,
splits = 4
):
super().__init__()
self.fiber_in = fiber_in
self.fiber_out = fiber_out
self.edge_dim = edge_dim
self.self_interaction = self_interaction
self.num_fourier_features = num_fourier_features
self.fourier_encode_dist = fourier_encode_dist
# radial function will assume a dimension of at minimum 1, for the relative distance - extra fourier features must be added to the edge dimension
edge_dim += (0 if not fourier_encode_dist else (num_fourier_features * 2))
# Neighbor -> center weights
self.kernel_unary = nn.ModuleDict()
self.splits = splits # for splitting the computation of kernel and basis, to reduce peak memory usage
for (di, mi), (do, mo) in (self.fiber_in * self.fiber_out):
self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim = edge_dim, splits = splits)
self.pool = pool
# Center -> center weights
if self_interaction:
assert self.pool, 'must pool edges if followed with self interaction'
self.self_interact = LinearSE3(fiber_in, fiber_out)
self.self_interact_sum = ResidualSE3()
def forward(
self,
inp,
edge_info,
rel_dist = None,
basis = None
):
splits = self.splits
neighbor_indices, neighbor_masks, edges = edge_info
rel_dist = rearrange(rel_dist, 'b m n -> b m n ()')
kernels = {}
outputs = {}
if self.fourier_encode_dist:
rel_dist = fourier_encode(rel_dist[..., None], num_encodings = self.num_fourier_features)
# split basis
basis_keys = basis.keys()
split_basis_values = list(zip(*list(map(lambda t: fast_split(t, splits, dim = 1), basis.values()))))
split_basis = list(map(lambda v: dict(zip(basis_keys, v)), split_basis_values))
# go through every permutation of input degree type to output degree type
for degree_out in self.fiber_out.degrees:
output = 0
degree_out_key = str(degree_out)
for degree_in, m_in in self.fiber_in:
etype = f'({degree_in},{degree_out})'
x = inp[str(degree_in)]
x = batched_index_select(x, neighbor_indices, dim = 1)
x = x.view(*x.shape[:3], to_order(degree_in) * m_in, 1)
kernel_fn = self.kernel_unary[etype]
edge_features = torch.cat((rel_dist, edges), dim = -1) if exists(edges) else rel_dist
output_chunk = None
split_x = fast_split(x, splits, dim = 1)
split_edge_features = fast_split(edge_features, splits, dim = 1)
# process input, edges, and basis in chunks along the sequence dimension
for x_chunk, edge_features, basis in zip(split_x, split_edge_features, split_basis):
kernel = kernel_fn(edge_features, basis = basis)
chunk = einsum('... o i, ... i c -> ... o c', kernel, x_chunk)
output_chunk = safe_cat(output_chunk, chunk, dim = 1)
output = output + output_chunk
if self.pool:
output = masked_mean(output, neighbor_masks, dim = 2) if exists(neighbor_masks) else output.mean(dim = 2)
leading_shape = x.shape[:2] if self.pool else x.shape[:3]
output = output.view(*leading_shape, -1, to_order(degree_out))
outputs[degree_out_key] = output
if self.self_interaction:
self_interact_out = self.self_interact(inp)
outputs = self.self_interact_sum(outputs, self_interact_out)
return outputs
class RadialFunc(nn.Module):
"""NN parameterized radial profile function."""
def __init__(
self,
num_freq,
in_dim,
out_dim,
edge_dim = None,
mid_dim = 128
):
super().__init__()
self.num_freq = num_freq
self.in_dim = in_dim
self.mid_dim = mid_dim
self.out_dim = out_dim
self.edge_dim = default(edge_dim, 0)
self.net = nn.Sequential(
nn.Linear(self.edge_dim + 1, mid_dim),
nn.LayerNorm(mid_dim),
nn.GELU(),
nn.Linear(mid_dim, mid_dim),
nn.LayerNorm(mid_dim),
nn.GELU(),
nn.Linear(mid_dim, num_freq * in_dim * out_dim)
)
def forward(self, x):
y = self.net(x)
return rearrange(y, '... (o i f) -> ... o () i () f', i = self.in_dim, o = self.out_dim)
class PairwiseConv(nn.Module):
"""SE(3)-equivariant convolution between two single-type features"""
def __init__(
self,
degree_in,
nc_in,
degree_out,
nc_out,
edge_dim = 0,
splits = 4
):
super().__init__()
self.degree_in = degree_in
self.degree_out = degree_out
self.nc_in = nc_in
self.nc_out = nc_out
self.num_freq = to_order(min(degree_in, degree_out))
self.d_out = to_order(degree_out)
self.edge_dim = edge_dim
self.rp = RadialFunc(self.num_freq, nc_in, nc_out, edge_dim)
self.splits = splits
def forward(self, feat, basis):
splits = self.splits
R = self.rp(feat)
B = basis[f'{self.degree_in},{self.degree_out}']
out_shape = (*R.shape[:3], self.d_out * self.nc_out, -1)
# torch.sum(R * B, dim = -1) is too memory intensive
# needs to be chunked to reduce peak memory usage
out = 0
for i in range(R.shape[-1]):
out += R[..., i] * B[..., i]
out = rearrange(out, 'b n h s ... -> (b n h s) ...')
# reshape and out
return out.view(*out_shape)
# feed forwards
class FeedForwardSE3(nn.Module):
def __init__(
self,
fiber,
mult = 4
):
super().__init__()
self.fiber = fiber
fiber_hidden = Fiber(list(map(lambda t: (t[0], t[1] * mult), fiber)))
self.project_in = LinearSE3(fiber, fiber_hidden)
self.nonlin = NormSE3(fiber_hidden)
self.project_out = LinearSE3(fiber_hidden, fiber)
def forward(self, features):
outputs = self.project_in(features)
outputs = self.nonlin(outputs)
outputs = self.project_out(outputs)
return outputs
class FeedForwardBlockSE3(nn.Module):
def __init__(
self,
fiber,
norm_gated_scale = False
):
super().__init__()
self.fiber = fiber
self.prenorm = NormSE3(fiber, gated_scale = norm_gated_scale)
self.feedforward = FeedForwardSE3(fiber)
self.residual = ResidualSE3()
def forward(self, features):
res = features
out = self.prenorm(features)
out = self.feedforward(out)
return self.residual(out, res)
# attention
class AttentionSE3(nn.Module):
def __init__(
self,
fiber,
dim_head = 64,
heads = 8,
attend_self = False,
edge_dim = None,
fourier_encode_dist = False,
rel_dist_num_fourier_features = 4,
use_null_kv = False,
splits = 4,
global_feats_dim = None,
linear_proj_keys = False,
tie_key_values = False
):
super().__init__()
hidden_dim = dim_head * heads
hidden_fiber = Fiber(list(map(lambda t: (t[0], hidden_dim), fiber)))
project_out = not (heads == 1 and len(fiber.dims) == 1 and dim_head == fiber.dims[0])
self.scale = dim_head ** -0.5
self.heads = heads
self.linear_proj_keys = linear_proj_keys # whether to linearly project features for keys, rather than convolve with basis
self.to_q = LinearSE3(fiber, hidden_fiber)
self.to_v = ConvSE3(fiber, hidden_fiber, edge_dim = edge_dim, pool = False, self_interaction = False, fourier_encode_dist = fourier_encode_dist, num_fourier_features = rel_dist_num_fourier_features, splits = splits)
assert not (linear_proj_keys and tie_key_values), 'you cannot do linear projection of keys and have shared key / values turned on at the same time'
if linear_proj_keys:
self.to_k = LinearSE3(fiber, hidden_fiber)
elif not tie_key_values:
self.to_k = ConvSE3(fiber, hidden_fiber, edge_dim = edge_dim, pool = False, self_interaction = False, fourier_encode_dist = fourier_encode_dist, num_fourier_features = rel_dist_num_fourier_features, splits = splits)
else:
self.to_k = None
self.to_out = LinearSE3(hidden_fiber, fiber) if project_out else nn.Identity()
self.use_null_kv = use_null_kv
if use_null_kv:
self.null_keys = nn.ParameterDict()
self.null_values = nn.ParameterDict()
for degree in fiber.degrees:
m = to_order(degree)
degree_key = str(degree)
self.null_keys[degree_key] = nn.Parameter(torch.zeros(heads, dim_head, m))
self.null_values[degree_key] = nn.Parameter(torch.zeros(heads, dim_head, m))
self.attend_self = attend_self
if attend_self:
self.to_self_k = LinearSE3(fiber, hidden_fiber)
self.to_self_v = LinearSE3(fiber, hidden_fiber)
self.accept_global_feats = exists(global_feats_dim)
if self.accept_global_feats:
global_input_fiber = Fiber.create(1, global_feats_dim)
global_output_fiber = Fiber.create(1, hidden_fiber[0])
self.to_global_k = LinearSE3(global_input_fiber, global_output_fiber)
self.to_global_v = LinearSE3(global_input_fiber, global_output_fiber)
def forward(self, features, edge_info, rel_dist, basis, global_feats = None, pos_emb = None, mask = None):
h, attend_self = self.heads, self.attend_self
device, dtype = get_tensor_device_and_dtype(features)
neighbor_indices, neighbor_mask, edges = edge_info
if exists(neighbor_mask):
neighbor_mask = rearrange(neighbor_mask, 'b i j -> b () i j')
queries = self.to_q(features)
values = self.to_v(features, edge_info, rel_dist, basis)
if self.linear_proj_keys:
keys = self.to_k(features)
keys = map_values(lambda val: batched_index_select(val, neighbor_indices, dim = 1), keys)
elif not exists(self.to_k):
keys = values
else:
keys = self.to_k(features, edge_info, rel_dist, basis)
if attend_self:
self_keys, self_values = self.to_self_k(features), self.to_self_v(features)
if exists(global_feats):
global_keys, global_values = self.to_global_k(global_feats), self.to_global_v(global_feats)
outputs = {}
for degree in features.keys():
q, k, v = map(lambda t: t[degree], (queries, keys, values))
q = rearrange(q, 'b i (h d) m -> b h i d m', h = h)
k, v = map(lambda t: rearrange(t, 'b i j (h d) m -> b h i j d m', h = h), (k, v))
if attend_self:
self_k, self_v = map(lambda t: t[degree], (self_keys, self_values))
self_k, self_v = map(lambda t: rearrange(t, 'b n (h d) m -> b h n () d m', h = h), (self_k, self_v))
k = torch.cat((self_k, k), dim = 3)
v = torch.cat((self_v, v), dim = 3)
if exists(pos_emb) and degree == '0':
query_pos_emb, key_pos_emb = pos_emb
query_pos_emb = rearrange(query_pos_emb, 'b i d -> b () i d ()')
key_pos_emb = rearrange(key_pos_emb, 'b i j d -> b () i j d ()')
q = apply_rotary_pos_emb(q, query_pos_emb)
k = apply_rotary_pos_emb(k, key_pos_emb)
v = apply_rotary_pos_emb(v, key_pos_emb)
if self.use_null_kv:
null_k, null_v = map(lambda t: t[degree], (self.null_keys, self.null_values))
null_k, null_v = map(lambda t: repeat(t, 'h d m -> b h i () d m', b = q.shape[0], i = q.shape[2]), (null_k, null_v))
k = torch.cat((null_k, k), dim = 3)
v = torch.cat((null_v, v), dim = 3)
if exists(global_feats) and degree == '0':
global_k, global_v = map(lambda t: t[degree], (global_keys, global_values))
global_k, global_v = map(lambda t: repeat(t, 'b j (h d) m -> b h i j d m', h = h, i = k.shape[2]), (global_k, global_v))
k = torch.cat((global_k, k), dim = 3)
v = torch.cat((global_v, v), dim = 3)
sim = einsum('b h i d m, b h i j d m -> b h i j', q, k) * self.scale
if exists(neighbor_mask):
num_left_pad = sim.shape[-1] - neighbor_mask.shape[-1]
mask = F.pad(neighbor_mask, (num_left_pad, 0), value = True)
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h i j d m -> b h i d m', attn, v)
outputs[degree] = rearrange(out, 'b h n d m -> b n (h d) m')
return self.to_out(outputs)
# AttentionSE3, but with one key / value projection shared across all query heads
class OneHeadedKVAttentionSE3(nn.Module):
def __init__(
self,
fiber,
dim_head = 64,
heads = 8,
attend_self = False,
edge_dim = None,
fourier_encode_dist = False,
rel_dist_num_fourier_features = 4,
use_null_kv = False,
splits = 4,
global_feats_dim = None,
linear_proj_keys = False,
tie_key_values = False
):
super().__init__()
hidden_dim = dim_head * heads
hidden_fiber = Fiber(list(map(lambda t: (t[0], hidden_dim), fiber)))
kv_hidden_fiber = Fiber(list(map(lambda t: (t[0], dim_head), fiber)))
project_out = not (heads == 1 and len(fiber.dims) == 1 and dim_head == fiber.dims[0])
self.scale = dim_head ** -0.5
self.heads = heads
self.linear_proj_keys = linear_proj_keys # whether to linearly project features for keys, rather than convolve with basis
self.to_q = LinearSE3(fiber, hidden_fiber)
self.to_v = ConvSE3(fiber, kv_hidden_fiber, edge_dim = edge_dim, pool = False, self_interaction = False, fourier_encode_dist = fourier_encode_dist, num_fourier_features = rel_dist_num_fourier_features, splits = splits)
assert not (linear_proj_keys and tie_key_values), 'you cannot do linear projection of keys and have shared key / values turned on at the same time'
if linear_proj_keys:
self.to_k = LinearSE3(fiber, kv_hidden_fiber)
elif not tie_key_values:
self.to_k = ConvSE3(fiber, kv_hidden_fiber, edge_dim = edge_dim, pool = False, self_interaction = False, fourier_encode_dist = fourier_encode_dist, num_fourier_features = rel_dist_num_fourier_features, splits = splits)
else:
self.to_k = None
self.to_out = LinearSE3(hidden_fiber, fiber) if project_out else nn.Identity()
self.use_null_kv = use_null_kv
if use_null_kv:
self.null_keys = nn.ParameterDict()
self.null_values = nn.ParameterDict()
for degree in fiber.degrees:
m = to_order(degree)
degree_key = str(degree)
self.null_keys[degree_key] = nn.Parameter(torch.zeros(dim_head, m))
self.null_values[degree_key] = nn.Parameter(torch.zeros(dim_head, m))
self.attend_self = attend_self
if attend_self:
self.to_self_k = LinearSE3(fiber, kv_hidden_fiber)
self.to_self_v = LinearSE3(fiber, kv_hidden_fiber)
self.accept_global_feats = exists(global_feats_dim)
if self.accept_global_feats:
global_input_fiber = Fiber.create(1, global_feats_dim)
global_output_fiber = Fiber.create(1, kv_hidden_fiber[0])
self.to_global_k = LinearSE3(global_input_fiber, global_output_fiber)
self.to_global_v = LinearSE3(global_input_fiber, global_output_fiber)
def forward(self, features, edge_info, rel_dist, basis, global_feats = None, pos_emb = None, mask = None):
h, attend_self = self.heads, self.attend_self
device, dtype = get_tensor_device_and_dtype(features)
neighbor_indices, neighbor_mask, edges = edge_info
if exists(neighbor_mask):
neighbor_mask = rearrange(neighbor_mask, 'b i j -> b () i j')
queries = self.to_q(features)
values = self.to_v(features, edge_info, rel_dist, basis)
if self.linear_proj_keys:
keys = self.to_k(features)
keys = map_values(lambda val: batched_index_select(val, neighbor_indices, dim = 1), keys)
elif not exists(self.to_k):
keys = values
else:
keys = self.to_k(features, edge_info, rel_dist, basis)
if attend_self:
self_keys, self_values = self.to_self_k(features), self.to_self_v(features)
if exists(global_feats):
global_keys, global_values = self.to_global_k(global_feats), self.to_global_v(global_feats)
outputs = {}
for degree in features.keys():
q, k, v = map(lambda t: t[degree], (queries, keys, values))
q = rearrange(q, 'b i (h d) m -> b h i d m', h = h)
if attend_self:
self_k, self_v = map(lambda t: t[degree], (self_keys, self_values))
self_k, self_v = map(lambda t: rearrange(t, 'b n d m -> b n () d m'), (self_k, self_v))
k = torch.cat((self_k, k), dim = 2)
v = torch.cat((self_v, v), dim = 2)
if exists(pos_emb) and degree == '0':
query_pos_emb, key_pos_emb = pos_emb
query_pos_emb = rearrange(query_pos_emb, 'b i d -> b () i d ()')
key_pos_emb = rearrange(key_pos_emb, 'b i j d -> b i j d ()')
q = apply_rotary_pos_emb(q, query_pos_emb)
k = apply_rotary_pos_emb(k, key_pos_emb)
v = apply_rotary_pos_emb(v, key_pos_emb)
if self.use_null_kv:
null_k, null_v = map(lambda t: t[degree], (self.null_keys, self.null_values))
null_k, null_v = map(lambda t: repeat(t, 'd m -> b i () d m', b = q.shape[0], i = q.shape[2]), (null_k, null_v))
k = torch.cat((null_k, k), dim = 2)
v = torch.cat((null_v, v), dim = 2)
if exists(global_feats) and degree == '0':
global_k, global_v = map(lambda t: t[degree], (global_keys, global_values))
global_k, global_v = map(lambda t: repeat(t, 'b j d m -> b i j d m', i = k.shape[1]), (global_k, global_v))
k = torch.cat((global_k, k), dim = 2)
v = torch.cat((global_v, v), dim = 2)
sim = einsum('b h i d m, b i j d m -> b h i j', q, k) * self.scale
if exists(neighbor_mask):
num_left_pad = sim.shape[-1] - neighbor_mask.shape[-1]
mask = F.pad(neighbor_mask, (num_left_pad, 0), value = True)
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b i j d m -> b h i d m', attn, v)
outputs[degree] = rearrange(out, 'b h n d m -> b n (h d) m')
return self.to_out(outputs)
class AttentionBlockSE3(nn.Module):
def __init__(
self,
fiber,
dim_head = 24,
heads = 8,
attend_self = False,
edge_dim = None,
use_null_kv = False,
fourier_encode_dist = False,
rel_dist_num_fourier_features = 4,
splits = 4,
global_feats_dim = False,
linear_proj_keys = False,
tie_key_values = False,
attention_klass = AttentionSE3,
norm_gated_scale = False
):
super().__init__()
self.attn = attention_klass(fiber, heads = heads, dim_head = dim_head, attend_self = attend_self, edge_dim = edge_dim, use_null_kv = use_null_kv, rel_dist_num_fourier_features = rel_dist_num_fourier_features, fourier_encode_dist =fourier_encode_dist, splits = splits, global_feats_dim = global_feats_dim, linear_proj_keys = linear_proj_keys, tie_key_values = tie_key_values)
self.prenorm = NormSE3(fiber, gated_scale = norm_gated_scale)
self.residual = ResidualSE3()
def forward(self, features, edge_info, rel_dist, basis, global_feats = None, pos_emb = None, mask = None):
res = features
outputs = self.prenorm(features)
outputs = self.attn(outputs, edge_info, rel_dist, basis, global_feats, pos_emb, mask)
return self.residual(outputs, res)
# egnn
class Swish_(nn.Module):
def forward(self, x):
return x * x.sigmoid()
SiLU = nn.SiLU if hasattr(nn, 'SiLU') else Swish_
class HtypesNorm(nn.Module):
def __init__(self, dim, eps = 1e-8, scale_init = 1e-2, bias_init = 1e-2):
super().__init__()
self.eps = eps
scale = torch.empty(1, 1, 1, dim, 1).fill_(scale_init)
bias = torch.empty(1, 1, 1, dim, 1).fill_(bias_init)
self.scale = nn.Parameter(scale)
self.bias = nn.Parameter(bias)
def forward(self, coors):
norm = coors.norm(dim = -1, keepdim = True)
normed_coors = coors / norm.clamp(min = self.eps)
return normed_coors * (norm * self.scale + self.bias)
class EGNN(nn.Module):
def __init__(
self,
fiber,
hidden_dim = 32,
edge_dim = 0,
init_eps = 1e-3,
coor_weights_clamp_value = None
):
super().__init__()
self.fiber = fiber
node_dim = fiber[0]
htypes = list(filter(lambda t: t.degrees != 0, fiber))
num_htypes = len(htypes)
htype_dims = sum([fiberel.dim for fiberel in htypes])
edge_input_dim = node_dim * 2 + htype_dims + edge_dim + 1
self.node_norm = nn.LayerNorm(node_dim)
self.edge_mlp = nn.Sequential(
nn.Linear(edge_input_dim, edge_input_dim * 2),
SiLU(),
nn.Linear(edge_input_dim * 2, hidden_dim),
SiLU()
)
self.htype_norms = nn.ModuleDict({})
self.htype_gating = nn.ModuleDict({})
for degree, dim in fiber:
if degree == 0:
continue
self.htype_norms[str(degree)] = HtypesNorm(dim)
self.htype_gating[str(degree)] = nn.Linear(node_dim, dim)
self.htypes_mlp = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim * 4),
SiLU(),
nn.Linear(hidden_dim * 4, htype_dims)
)
self.node_mlp = nn.Sequential(
nn.Linear(node_dim + hidden_dim, node_dim * 2),
SiLU(),
nn.Linear(node_dim * 2, node_dim)
)
self.coor_weights_clamp_value = coor_weights_clamp_value
self.init_eps = init_eps
self.apply(self.init_)
def init_(self, module):
if type(module) in {nn.Linear}:
nn.init.normal_(module.weight, std = self.init_eps)
def forward(
self,
features,
edge_info,
rel_dist,
mask = None,
**kwargs
):
neighbor_indices, neighbor_masks, edges = edge_info
mask = neighbor_masks
# type 0 features
nodes = features['0']
nodes = rearrange(nodes, '... () -> ...')
# higher types (htype)
htypes = list(filter(lambda t: t[0] != '0', features.items()))
htype_degrees = list(map(lambda t: t[0], htypes))
htype_dims = list(map(lambda t: t[1].shape[-2], htypes))
# prepare higher types
rel_htypes = []
rel_htypes_dists = []
for degree, htype in htypes:
rel_htype = rearrange(htype, 'b i d m -> b i () d m') - rearrange(htype, 'b j d m -> b () j d m')
rel_htype_dist = rel_htype.norm(dim = -1)
rel_htypes.append(rel_htype)
rel_htypes_dists.append(rel_htype_dist)
# prepare edges for edge MLP
nodes_i = rearrange(nodes, 'b i d -> b i () d')
nodes_j = batched_index_select(nodes, neighbor_indices, dim = 1)
neighbor_higher_type_dists = map(lambda t: batched_index_select(t, neighbor_indices, dim = 2), rel_htypes_dists)
coor_rel_dist = rearrange(rel_dist, 'b i j -> b i j ()')
edge_mlp_inputs = broadcat((nodes_i, nodes_j, *neighbor_higher_type_dists, coor_rel_dist), dim = -1)
if exists(edges):
edge_mlp_inputs = torch.cat((edge_mlp_inputs, edges), dim = -1)
# get intermediate representation
m_ij = self.edge_mlp(edge_mlp_inputs)
# to coordinates
htype_weights = self.htypes_mlp(m_ij)
if exists(self.coor_weights_clamp_value):
clamp_value = self.coor_weights_clamp_value
htype_weights.clamp_(min = -clamp_value, max = clamp_value)
split_htype_weights = htype_weights.split(htype_dims, dim = -1)
htype_updates = []
if exists(mask):
htype_mask = rearrange(mask, 'b i j -> b i j ()')
htype_weights = htype_weights.masked_fill(~htype_mask, 0.)
for degree, rel_htype, htype_weight in zip(htype_degrees, rel_htypes, split_htype_weights):
normed_rel_htype = self.htype_norms[str(degree)](rel_htype)
normed_rel_htype = batched_index_select(normed_rel_htype, neighbor_indices, dim = 2)
htype_update = einsum('b i j d m, b i j d -> b i d m', normed_rel_htype, htype_weight)
htype_updates.append(htype_update)
# to nodes
if exists(mask):
m_ij_mask = rearrange(mask, '... -> ... ()')
m_ij = m_ij.masked_fill(~m_ij_mask, 0.)
m_i = m_ij.sum(dim = -2)
normed_nodes = self.node_norm(nodes)
node_mlp_input = torch.cat((normed_nodes, m_i), dim = -1)
node_out = self.node_mlp(node_mlp_input) + nodes
# update nodes
features['0'] = rearrange(node_out, '... -> ... ()')
# update higher types
update_htype_dicts = dict(zip(htype_degrees, htype_updates))
for degree, update_htype in update_htype_dicts.items():
features[degree] = features[degree] + update_htype
for degree in htype_degrees:
gating = self.htype_gating[str(degree)](node_out).sigmoid()
features[degree] = features[degree] * rearrange(gating, '... -> ... ()')
return features
class EGnnNetwork(nn.Module):
def __init__(
self,
*,
fiber,
depth,
edge_dim = 0,
hidden_dim = 32,
coor_weights_clamp_value = None,
feedforward = False
):
super().__init__()
self.fiber = fiber
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
EGNN(fiber = fiber, edge_dim = edge_dim, hidden_dim = hidden_dim, coor_weights_clamp_value = coor_weights_clamp_value),
FeedForwardBlockSE3(fiber) if feedforward else None
]))
def forward(
self,
features,
edge_info,
rel_dist,
basis,
global_feats = None,
pos_emb = None,
mask = None,
**kwargs
):
neighbor_indices, neighbor_masks, edges = edge_info
device = neighbor_indices.device
# modify neighbors to include self (since se3 transformer depends on removing attention to token self, but this does not apply for EGNN)
self_indices = torch.arange(neighbor_indices.shape[1], device = device)
self_indices = rearrange(self_indices, 'i -> () i ()')
neighbor_indices = broadcat((self_indices, neighbor_indices), dim = -1)
neighbor_masks = F.pad(neighbor_masks, (1, 0), value = True)
rel_dist = F.pad(rel_dist, (1, 0), value = 0.)
if exists(edges):
edges = F.pad(edges, (0, 0, 1, 0), value = 0.) # make edge of token to itself 0 for now
edge_info = (neighbor_indices, neighbor_masks, edges)
# go through layers
for egnn, ff in self.layers:
features = egnn(
features,
edge_info = edge_info,
rel_dist = rel_dist,
basis = basis,
global_feats = global_feats,
pos_emb = pos_emb,
mask = mask,
**kwargs
)
if exists(ff):
features = ff(features)
return features
# main class
class SE3Transformer(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 24,
depth = 2,
input_degrees = 1,
num_degrees = None,
output_degrees = 1,
valid_radius = 1e5,
reduce_dim_out = False,
num_tokens = None,
num_positions = None,
num_edge_tokens = None,
edge_dim = None,
reversible = False,
attend_self = True,
use_null_kv = False,
differentiable_coors = False,
fourier_encode_dist = False,
rel_dist_num_fourier_features = 4,
num_neighbors = float('inf'),
attend_sparse_neighbors = False,
num_adj_degrees = None,
adj_dim = 0,
max_sparse_neighbors = float('inf'),
dim_in = None,
dim_out = None,
norm_out = False,
num_conv_layers = 0,
causal = False,
splits = 4,
global_feats_dim = None,
linear_proj_keys = False,
one_headed_key_values = False,
tie_key_values = False,
rotary_position = False,
rotary_rel_dist = False,
norm_gated_scale = False,
use_egnn = False,
egnn_hidden_dim = 32,
egnn_weights_clamp_value = None,
egnn_feedforward = False,
hidden_fiber_dict = None,
out_fiber_dict = None
):
super().__init__()
dim_in = default(dim_in, dim)
self.dim_in = cast_tuple(dim_in, input_degrees)
self.dim = dim
# token embedding
self.token_emb = nn.Embedding(num_tokens, dim) if exists(num_tokens) else None
# positional embedding
self.num_positions = num_positions
self.pos_emb = nn.Embedding(num_positions, dim) if exists(num_positions) else None
self.rotary_rel_dist = rotary_rel_dist
self.rotary_position = rotary_position
self.rotary_pos_emb = None
if rotary_position or rotary_rel_dist:
num_rotaries = int(rotary_position) + int(rotary_rel_dist)
self.rotary_pos_emb = SinusoidalEmbeddings(dim_head // num_rotaries)
# edges
assert not (exists(num_edge_tokens) and not exists(edge_dim)), 'edge dimension (edge_dim) must be supplied if SE3 transformer is to have edge tokens'
self.edge_emb = nn.Embedding(num_edge_tokens, edge_dim) if exists(num_edge_tokens) else None
self.has_edges = exists(edge_dim) and edge_dim > 0
self.input_degrees = input_degrees
assert not (exists(num_adj_degrees) and num_adj_degrees < 1), 'make sure adjacent degrees is greater than 1'
self.num_degrees = num_degrees if exists(num_degrees) else (max(hidden_fiber_dict.keys()) + 1)
output_degrees = output_degrees if not use_egnn else None
self.output_degrees = output_degrees
# whether to differentiate through basis, needed for alphafold2
self.differentiable_coors = differentiable_coors
# neighbors hyperparameters
self.valid_radius = valid_radius
self.num_neighbors = num_neighbors
# sparse neighbors, derived from adjacency matrix or edges being passed in
self.attend_sparse_neighbors = attend_sparse_neighbors
self.max_sparse_neighbors = max_sparse_neighbors
# adjacent neighbor derivation and embed
self.num_adj_degrees = num_adj_degrees
self.adj_emb = nn.Embedding(num_adj_degrees + 1, adj_dim) if exists(num_adj_degrees) and adj_dim > 0 else None
edge_dim = (edge_dim if self.has_edges else 0) + (adj_dim if exists(self.adj_emb) else 0)
# define fibers and dimensionality
dim_in = default(dim_in, dim)
dim_out = default(dim_out, dim)
assert exists(num_degrees) or exists(hidden_fiber_dict), 'either num_degrees or hidden_fiber_dict must be specified'
fiber_in = Fiber.create(input_degrees, dim_in)
if exists(hidden_fiber_dict):
fiber_hidden = Fiber(hidden_fiber_dict)
elif exists(num_degrees):
fiber_hidden = Fiber.create(num_degrees, dim)
if exists(out_fiber_dict):
fiber_out = Fiber(out_fiber_dict)
self.output_degrees = max(out_fiber_dict.keys()) + 1
elif exists(output_degrees):
fiber_out = Fiber.create(output_degrees, dim_out)
else:
fiber_out = None
conv_kwargs = dict(edge_dim = edge_dim, fourier_encode_dist = fourier_encode_dist, num_fourier_features = rel_dist_num_fourier_features, splits = splits)
# causal
assert not (causal and not attend_self), 'attending to self must be turned on if in autoregressive mode (for the first token)'
self.causal = causal
# main network
self.conv_in = ConvSE3(fiber_in, fiber_hidden, **conv_kwargs)
# pre-convs
self.convs = nn.ModuleList([])
for _ in range(num_conv_layers):
self.convs.append(nn.ModuleList([
ConvSE3(fiber_hidden, fiber_hidden, **conv_kwargs),
NormSE3(fiber_hidden, gated_scale = norm_gated_scale)
]))
# global features
self.accept_global_feats = exists(global_feats_dim)
assert not (reversible and self.accept_global_feats), 'reversibility and global features are not compatible'
# trunk
self.attend_self = attend_self
default_attention_klass = OneHeadedKVAttentionSE3 if one_headed_key_values else AttentionSE3
if use_egnn:
self.net = EGnnNetwork(fiber = fiber_hidden, depth = depth, edge_dim = edge_dim, hidden_dim = egnn_hidden_dim, coor_weights_clamp_value = egnn_weights_clamp_value, feedforward = egnn_feedforward)
else:
layers = nn.ModuleList([])
for ind in range(depth):
attention_klass = default_attention_klass
layers.append(nn.ModuleList([
AttentionBlockSE3(fiber_hidden, heads = heads, dim_head = dim_head, attend_self = attend_self, edge_dim = edge_dim, fourier_encode_dist = fourier_encode_dist, rel_dist_num_fourier_features = rel_dist_num_fourier_features, use_null_kv = use_null_kv, splits = splits, global_feats_dim = global_feats_dim, linear_proj_keys = linear_proj_keys, attention_klass = attention_klass, tie_key_values = tie_key_values, norm_gated_scale = norm_gated_scale),
FeedForwardBlockSE3(fiber_hidden, norm_gated_scale = norm_gated_scale)
]))
execution_class = ReversibleSequence if reversible else SequentialSequence
self.net = execution_class(layers)
# out
self.conv_out = ConvSE3(fiber_hidden, fiber_out, **conv_kwargs) if exists(fiber_out) else None
self.norm = NormSE3(fiber_out, gated_scale = norm_gated_scale, nonlin = nn.Identity()) if (norm_out or reversible) and exists(fiber_out) else nn.Identity()
final_fiber = default(fiber_out, fiber_hidden)
self.linear_out = LinearSE3(
final_fiber,
Fiber(list(map(lambda t: FiberEl(degrees = t[0], dim = 1), final_fiber)))
) if reduce_dim_out else None
def forward(
self,
feats,
coors,
mask = None,
adj_mat = None,
edges = None,
return_type = None,
return_pooled = False,
neighbor_mask = None,
global_feats = None
):
assert not (self.accept_global_feats ^ exists(global_feats)), 'you cannot pass in global features unless you init the class correctly'
_mask = mask
if self.output_degrees == 1:
return_type = 0
if exists(self.token_emb):
feats = self.token_emb(feats)
if exists(self.pos_emb):
assert feats.shape[1] <= self.num_positions, 'feature sequence length must be less than the number of positions given at init'
pos_emb = self.pos_emb(torch.arange(feats.shape[1], device = feats.device))
feats += rearrange(pos_emb, 'n d -> () n d')
assert not (self.attend_sparse_neighbors and not exists(adj_mat)), 'adjacency matrix (adjacency_mat) or edges (edges) must be passed in'
assert not (self.has_edges and not exists(edges)), 'edge embedding (num_edge_tokens & edge_dim) must be supplied if one were to train on edge types'
if torch.is_tensor(feats):
feats = {'0': feats[..., None]}
if torch.is_tensor(global_feats):
global_feats = {'0': global_feats[..., None]}
b, n, d, *_, device = *feats['0'].shape, feats['0'].device
assert d == self.dim_in[0], f'feature dimension {d} must be equal to dimension given at init {self.dim_in[0]}'
assert set(map(int, feats.keys())) == set(range(self.input_degrees)), f'input must have {self.input_degrees} degree'
num_degrees, neighbors, max_sparse_neighbors, valid_radius = self.num_degrees, self.num_neighbors, self.max_sparse_neighbors, self.valid_radius
assert self.attend_sparse_neighbors or neighbors > 0, 'you must either attend to sparsely bonded neighbors, or set number of locally attended neighbors to be greater than 0'
# se3 transformer by default cannot have a node attend to itself
exclude_self_mask = rearrange(~torch.eye(n, dtype = torch.bool, device = device), 'i j -> () i j')
remove_self = lambda t: t.masked_select(exclude_self_mask).reshape(b, n, n - 1)
get_max_value = lambda t: torch.finfo(t.dtype).max
# create N-degrees adjacent matrix from 1st degree connections
if exists(self.num_adj_degrees):
if len(adj_mat.shape) == 2:
adj_mat = repeat(adj_mat.clone(), 'i j -> b i j', b = b)
adj_indices = adj_mat.clone().long()
for ind in range(self.num_adj_degrees - 1):
degree = ind + 2
next_degree_adj_mat = (adj_mat.float() @ adj_mat.float()) > 0
next_degree_mask = (next_degree_adj_mat.float() - adj_mat.float()).bool()
adj_indices = adj_indices.masked_fill(next_degree_mask, degree)
adj_mat = next_degree_adj_mat.clone()
adj_indices = adj_indices.masked_select(exclude_self_mask).reshape(b, n, n - 1)
# calculate sparsely connected neighbors
sparse_neighbor_mask = None
num_sparse_neighbors = 0
if self.attend_sparse_neighbors:
assert exists(adj_mat), 'adjacency matrix must be passed in (keyword argument adj_mat)'
if exists(adj_mat):
if len(adj_mat.shape) == 2:
adj_mat = repeat(adj_mat, 'i j -> b i j', b = b)
adj_mat = remove_self(adj_mat)
adj_mat_values = adj_mat.float()
adj_mat_max_neighbors = adj_mat_values.sum(dim = -1).max().item()
if max_sparse_neighbors < adj_mat_max_neighbors:
noise = torch.empty_like(adj_mat_values).uniform_(-0.01, 0.01)
adj_mat_values += noise
num_sparse_neighbors = int(min(max_sparse_neighbors, adj_mat_max_neighbors))
values, indices = adj_mat_values.topk(num_sparse_neighbors, dim = -1)
sparse_neighbor_mask = torch.zeros_like(adj_mat_values).scatter_(-1, indices, values)
sparse_neighbor_mask = sparse_neighbor_mask > 0.5
# exclude edge of token to itself
indices = repeat(torch.arange(n, device = device), 'j -> b i j', b = b, i = n)
rel_pos = rearrange(coors, 'b n d -> b n () d') - rearrange(coors, 'b n d -> b () n d')
indices = indices.masked_select(exclude_self_mask).reshape(b, n, n - 1)
rel_pos = rel_pos.masked_select(exclude_self_mask[..., None]).reshape(b, n, n - 1, 3)
if exists(mask):
mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')
mask = mask.masked_select(exclude_self_mask).reshape(b, n, n - 1)
if exists(edges):
if exists(self.edge_emb):
edges = self.edge_emb(edges)
edges = edges.masked_select(exclude_self_mask[..., None]).reshape(b, n, n - 1, -1)
if exists(self.adj_emb):
adj_emb = self.adj_emb(adj_indices)
edges = torch.cat((edges, adj_emb), dim = -1) if exists(edges) else adj_emb
rel_dist = rel_pos.norm(dim = -1)
# rel_dist gets modified using adjacency or neighbor mask
modified_rel_dist = rel_dist.clone()
max_value = get_max_value(modified_rel_dist) # for masking out nodes from being considered as neighbors
# neighbors
if exists(neighbor_mask):
neighbor_mask = remove_self(neighbor_mask)
max_neighbors = neighbor_mask.sum(dim = -1).max().item()
if max_neighbors > neighbors:
print(f'neighbor_mask shows maximum number of neighbors as {max_neighbors} but specified number of neighbors is {neighbors}')
modified_rel_dist = modified_rel_dist.masked_fill(~neighbor_mask, max_value)
# use sparse neighbor mask to assign priority of bonded
if exists(sparse_neighbor_mask):
modified_rel_dist = modified_rel_dist.masked_fill(sparse_neighbor_mask, 0.)
# mask out future nodes to high distance if causal turned on
if self.causal:
causal_mask = torch.ones(n, n - 1, device = device).triu().bool()
modified_rel_dist = modified_rel_dist.masked_fill(causal_mask[None, ...], max_value)
# if number of local neighbors by distance is set to 0, then only fetch the sparse neighbors defined by adjacency matrix
if neighbors == 0:
valid_radius = 0
# get neighbors and neighbor mask, excluding self
neighbors = int(min(neighbors, n - 1))
total_neighbors = int(neighbors + num_sparse_neighbors)
assert total_neighbors > 0, 'you must be fetching at least 1 neighbor'
total_neighbors = int(min(total_neighbors, n - 1)) # make sure total neighbors does not exceed the length of the sequence itself
dist_values, nearest_indices = modified_rel_dist.topk(total_neighbors, dim = -1, largest = False)
neighbor_mask = dist_values <= valid_radius
neighbor_rel_dist = batched_index_select(rel_dist, nearest_indices, dim = 2)
neighbor_rel_pos = batched_index_select(rel_pos, nearest_indices, dim = 2)
neighbor_indices = batched_index_select(indices, nearest_indices, dim = 2)
if exists(mask):
neighbor_mask = neighbor_mask & batched_index_select(mask, nearest_indices, dim = 2)
if exists(edges):
edges = batched_index_select(edges, nearest_indices, dim = 2)
# calculate rotary pos emb
rotary_pos_emb = None
rotary_query_pos_emb = None
rotary_key_pos_emb = None
if self.rotary_position:
seq = torch.arange(n, device = device)
seq_pos_emb = self.rotary_pos_emb(seq)
self_indices = torch.arange(neighbor_indices.shape[1], device = device)
self_indices = repeat(self_indices, 'i -> b i ()', b = b)
neighbor_indices_with_self = torch.cat((self_indices, neighbor_indices), dim = 2)
pos_emb = batched_index_select(seq_pos_emb, neighbor_indices_with_self, dim = 0)
rotary_key_pos_emb = pos_emb
rotary_query_pos_emb = repeat(seq_pos_emb, 'n d -> b n d', b = b)
if self.rotary_rel_dist:
neighbor_rel_dist_with_self = F.pad(neighbor_rel_dist, (1, 0), value = 0) * 1e2
rel_dist_pos_emb = self.rotary_pos_emb(neighbor_rel_dist_with_self)
rotary_key_pos_emb = safe_cat(rotary_key_pos_emb, rel_dist_pos_emb, dim = -1)
query_dist = torch.zeros(n, device = device)
query_pos_emb = self.rotary_pos_emb(query_dist)
query_pos_emb = repeat(query_pos_emb, 'n d -> b n d', b = b)
rotary_query_pos_emb = safe_cat(rotary_query_pos_emb, query_pos_emb, dim = -1)
if exists(rotary_query_pos_emb) and exists(rotary_key_pos_emb):
rotary_pos_emb = (rotary_query_pos_emb, rotary_key_pos_emb)
# calculate basis
basis = get_basis(neighbor_rel_pos, num_degrees - 1, differentiable = self.differentiable_coors)
# main logic
edge_info = (neighbor_indices, neighbor_mask, edges)
x = feats
# project in
x = self.conv_in(x, edge_info, rel_dist = neighbor_rel_dist, basis = basis)
# preconvolution layers
for conv, nonlin in self.convs:
x = nonlin(x)
x = conv(x, edge_info, rel_dist = neighbor_rel_dist, basis = basis)
# transformer layers
x = self.net(x, edge_info = edge_info, rel_dist = neighbor_rel_dist, basis = basis, global_feats = global_feats, pos_emb = rotary_pos_emb, mask = _mask)
# project out
if exists(self.conv_out):
x = self.conv_out(x, edge_info, rel_dist = neighbor_rel_dist, basis = basis)
# norm
x = self.norm(x)
# reduce dim if specified
if exists(self.linear_out):
x = self.linear_out(x)
x = map_values(lambda t: t.squeeze(dim = 2), x)
if return_pooled:
mask_fn = (lambda t: masked_mean(t, _mask, dim = 1)) if exists(_mask) else (lambda t: t.mean(dim = 1))
x = map_values(mask_fn, x)
if '0' in x:
x['0'] = x['0'].squeeze(dim = -1)
if exists(return_type):
return x[str(return_type)]
return x
| se3-transformer-pytorch-main | se3_transformer_pytorch/se3_transformer_pytorch.py |
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# helpers
def map_values(fn, x):
out = {}
for (k, v) in x.items():
out[k] = fn(v)
return out
def dict_chunk(x, chunks, dim):
out1 = {}
out2 = {}
for (k, v) in x.items():
c1, c2 = v.chunk(chunks, dim = dim)
out1[k] = c1
out2[k] = c2
return out1, out2
def dict_sum(x, y):
out = {}
for k in x.keys():
out[k] = x[k] + y[k]
return out
def dict_subtract(x, y):
out = {}
for k in x.keys():
out[k] = x[k] - y[k]
return out
def dict_cat(x, y, dim):
out = {}
for k, v1 in x.items():
v2 = y[k]
out[k] = torch.cat((v1, v2), dim = dim)
return out
def dict_set_(x, key, value):
for k, v in x.items():
setattr(v, key, value)
def dict_backwards_(outputs, grad_tensors):
for k, v in outputs.items():
torch.autograd.backward(v, grad_tensors[k], retain_graph = True)
def dict_del_(x):
for k, v in x.items():
del v
del x
def values(d):
return [v for _, v in d.items()]
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, **kwargs):
training = self.training
x1, x2 = dict_chunk(x, 2, dim = -1)
y1, y2 = None, None
with torch.no_grad():
y1 = dict_sum(x1, self.f(x2, record_rng = training, **kwargs))
y2 = dict_sum(x2, self.g(y1, record_rng = training))
return dict_cat(y1, y2, dim = -1)
def backward_pass(self, y, dy, **kwargs):
y1, y2 = dict_chunk(y, 2, dim = -1)
dict_del_(y)
dy1, dy2 = dict_chunk(dy, 2, dim = -1)
dict_del_(dy)
with torch.enable_grad():
dict_set_(y1, 'requires_grad', True)
gy1 = self.g(y1, set_rng = True)
dict_backwards_(gy1, dy2)
with torch.no_grad():
x2 = dict_subtract(y2, gy1)
dict_del_(y2)
dict_del_(gy1)
dx1 = dict_sum(dy1, map_values(lambda t: t.grad, y1))
dict_del_(dy1)
dict_set_(y1, 'grad', None)
with torch.enable_grad():
dict_set_(x2, 'requires_grad', True)
fx2 = self.f(x2, set_rng = True, **kwargs)
dict_backwards_(fx2, dx1)
with torch.no_grad():
x1 = dict_subtract(y1, fx2)
dict_del_(y1)
dict_del_(fx2)
dx2 = dict_sum(dy2, map_values(lambda t: t.grad, x2))
dict_del_(dy2)
dict_set_(x2, 'grad', None)
x2 = map_values(lambda t: t.detach(), x2)
x = dict_cat(x1, x2, dim = -1)
dx = dict_cat(dx1, dx2, dim = -1)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
input_keys = kwargs.pop('input_keys')
split_dims = kwargs.pop('split_dims')
input_values = x.split(split_dims, dim = -1)
x = dict(zip(input_keys, input_values))
ctx.kwargs = kwargs
ctx.split_dims = split_dims
ctx.input_keys = input_keys
for block in blocks:
x = block(x, **kwargs)
ctx.y = map_values(lambda t: t.detach(), x)
ctx.blocks = blocks
x = torch.cat(values(x), dim = -1)
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
input_keys = ctx.input_keys
split_dims = ctx.split_dims
dy = dy.split(split_dims, dim = -1)
dy = dict(zip(input_keys, dy))
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
dy = torch.cat(values(dy), dim = -1)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, blocks):
super().__init__()
self.blocks = blocks
def forward(self, x, **kwargs):
for (attn, ff) in self.blocks:
x = attn(x, **kwargs)
x = ff(x)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks):
super().__init__()
self.blocks = nn.ModuleList([ReversibleBlock(f, g) for (f, g) in blocks])
def forward(self, x, **kwargs):
blocks = self.blocks
x = map_values(lambda t: torch.cat((t, t), dim = -1), x)
input_keys = x.keys()
split_dims = tuple(map(lambda t: t.shape[-1], x.values()))
block_kwargs = {'input_keys': input_keys, 'split_dims': split_dims, **kwargs}
x = torch.cat(values(x), dim = -1)
x = _ReversibleFunction.apply(x, blocks, block_kwargs)
x = dict(zip(input_keys, x.split(split_dims, dim = -1)))
x = map_values(lambda t: torch.stack(t.chunk(2, dim = -1)).mean(dim = 0), x)
return x
| se3-transformer-pytorch-main | se3_transformer_pytorch/reversible.py |
from se3_transformer_pytorch.se3_transformer_pytorch import SE3Transformer
| se3-transformer-pytorch-main | se3_transformer_pytorch/__init__.py |
import os
import sys
import time
import pickle
import gzip
import torch
import contextlib
from functools import wraps, lru_cache
from filelock import FileLock
from einops import rearrange
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def uniq(arr):
return list({el: True for el in arr}.keys())
def to_order(degree):
return 2 * degree + 1
def map_values(fn, d):
return {k: fn(v) for k, v in d.items()}
def safe_cat(arr, el, dim):
if not exists(arr):
return el
return torch.cat((arr, el), dim = dim)
def cast_tuple(val, depth):
return val if isinstance(val, tuple) else (val,) * depth
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return torch.cat(tensors, dim = dim)
def batched_index_select(values, indices, dim = 1):
value_dims = values.shape[(dim + 1):]
values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices))
indices = indices[(..., *((None,) * len(value_dims)))]
indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims)
value_expand_len = len(indices_shape) - (dim + 1)
values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)]
value_expand_shape = [-1] * len(values.shape)
expand_slice = slice(dim, (dim + value_expand_len))
value_expand_shape[expand_slice] = indices.shape[expand_slice]
values = values.expand(*value_expand_shape)
dim += value_expand_len
return values.gather(dim, indices)
def masked_mean(tensor, mask, dim = -1):
diff_len = len(tensor.shape) - len(mask.shape)
mask = mask[(..., *((None,) * diff_len))]
tensor.masked_fill_(~mask, 0.)
total_el = mask.sum(dim = dim)
mean = tensor.sum(dim = dim) / total_el.clamp(min = 1.)
mean.masked_fill_(total_el == 0, 0.)
return mean
def rand_uniform(size, min_val, max_val):
return torch.empty(size).uniform_(min_val, max_val)
def fast_split(arr, splits, dim=0):
axis_len = arr.shape[dim]
splits = min(axis_len, max(splits, 1))
chunk_size = axis_len // splits
remainder = axis_len - chunk_size * splits
s = 0
for i in range(splits):
adjust, remainder = 1 if remainder > 0 else 0, remainder - 1
yield torch.narrow(arr, dim, s, chunk_size + adjust)
s += chunk_size + adjust
def fourier_encode(x, num_encodings = 4, include_self = True, flatten = True):
x = x.unsqueeze(-1)
device, dtype, orig_x = x.device, x.dtype, x
scales = 2 ** torch.arange(num_encodings, device = device, dtype = dtype)
x = x / scales
x = torch.cat([x.sin(), x.cos()], dim=-1)
x = torch.cat((x, orig_x), dim = -1) if include_self else x
x = rearrange(x, 'b m n ... -> b m n (...)') if flatten else x
return x
# default dtype context manager
@contextlib.contextmanager
def torch_default_dtype(dtype):
prev_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
yield
torch.set_default_dtype(prev_dtype)
def cast_torch_tensor(fn):
@wraps(fn)
def inner(t):
if not torch.is_tensor(t):
t = torch.tensor(t, dtype = torch.get_default_dtype())
return fn(t)
return inner
# benchmark tool
def benchmark(fn):
def inner(*args, **kwargs):
start = time.time()
res = fn(*args, **kwargs)
diff = time.time() - start
return diff, res
return inner
# caching functions
def cache(cache, key_fn):
def cache_inner(fn):
@wraps(fn)
def inner(*args, **kwargs):
key_name = key_fn(*args, **kwargs)
if key_name in cache:
return cache[key_name]
res = fn(*args, **kwargs)
cache[key_name] = res
return res
return inner
return cache_inner
# cache in directory
def cache_dir(dirname, maxsize=128):
'''
Cache a function with a directory
:param dirname: the directory path
:param maxsize: maximum size of the RAM cache (there is no limit for the directory cache)
'''
def decorator(func):
@lru_cache(maxsize=maxsize)
@wraps(func)
def wrapper(*args, **kwargs):
if not exists(dirname):
return func(*args, **kwargs)
os.makedirs(dirname, exist_ok = True)
indexfile = os.path.join(dirname, "index.pkl")
lock = FileLock(os.path.join(dirname, "mutex"))
with lock:
index = {}
if os.path.exists(indexfile):
with open(indexfile, "rb") as file:
index = pickle.load(file)
key = (args, frozenset(kwargs), func.__defaults__)
if key in index:
filename = index[key]
else:
index[key] = filename = f"{len(index)}.pkl.gz"
with open(indexfile, "wb") as file:
pickle.dump(index, file)
filepath = os.path.join(dirname, filename)
if os.path.exists(filepath):
with lock:
with gzip.open(filepath, "rb") as file:
result = pickle.load(file)
return result
print(f"compute {filename}... ", end="", flush = True)
result = func(*args, **kwargs)
print(f"save {filename}... ", end="", flush = True)
with lock:
with gzip.open(filepath, "wb") as file:
pickle.dump(result, file)
print("done")
return result
return wrapper
return decorator
| se3-transformer-pytorch-main | se3_transformer_pytorch/utils.py |
import os
import numpy as np
import torch
from torch import sin, cos, atan2, acos
from math import pi
from pathlib import Path
from functools import wraps
from se3_transformer_pytorch.utils import exists, default, cast_torch_tensor, to_order
from se3_transformer_pytorch.spherical_harmonics import get_spherical_harmonics, clear_spherical_harmonics_cache
DATA_PATH = path = Path(os.path.dirname(__file__)) / 'data'
try:
path = DATA_PATH / 'J_dense.pt'
Jd = torch.load(str(path))
except:
path = DATA_PATH / 'J_dense.npy'
Jd_np = np.load(str(path), allow_pickle = True)
Jd = list(map(torch.from_numpy, Jd_np))
def wigner_d_matrix(degree, alpha, beta, gamma, dtype = None, device = None):
"""Create wigner D matrices for batch of ZYZ Euler anglers for degree l."""
J = Jd[degree].type(dtype).to(device)
order = to_order(degree)
x_a = z_rot_mat(alpha, degree)
x_b = z_rot_mat(beta, degree)
x_c = z_rot_mat(gamma, degree)
res = x_a @ J @ x_b @ J @ x_c
return res.view(order, order)
def z_rot_mat(angle, l):
device, dtype = angle.device, angle.dtype
order = to_order(l)
m = angle.new_zeros((order, order))
inds = torch.arange(0, order, 1, dtype=torch.long, device=device)
reversed_inds = torch.arange(2 * l, -1, -1, dtype=torch.long, device=device)
frequencies = torch.arange(l, -l - 1, -1, dtype=dtype, device=device)[None]
m[inds, reversed_inds] = sin(frequencies * angle[None])
m[inds, inds] = cos(frequencies * angle[None])
return m
def irr_repr(order, alpha, beta, gamma, dtype = None):
"""
irreducible representation of SO3
- compatible with compose and spherical_harmonics
"""
cast_ = cast_torch_tensor(lambda t: t)
dtype = default(dtype, torch.get_default_dtype())
alpha, beta, gamma = map(cast_, (alpha, beta, gamma))
return wigner_d_matrix(order, alpha, beta, gamma, dtype = dtype)
@cast_torch_tensor
def rot_z(gamma):
'''
Rotation around Z axis
'''
return torch.tensor([
[cos(gamma), -sin(gamma), 0],
[sin(gamma), cos(gamma), 0],
[0, 0, 1]
], dtype=gamma.dtype)
@cast_torch_tensor
def rot_y(beta):
'''
Rotation around Y axis
'''
return torch.tensor([
[cos(beta), 0, sin(beta)],
[0, 1, 0],
[-sin(beta), 0, cos(beta)]
], dtype=beta.dtype)
@cast_torch_tensor
def x_to_alpha_beta(x):
'''
Convert point (x, y, z) on the sphere into (alpha, beta)
'''
x = x / torch.norm(x)
beta = acos(x[2])
alpha = atan2(x[1], x[0])
return (alpha, beta)
def rot(alpha, beta, gamma):
'''
ZYZ Euler angles rotation
'''
return rot_z(alpha) @ rot_y(beta) @ rot_z(gamma)
def compose(a1, b1, c1, a2, b2, c2):
"""
(a, b, c) = (a1, b1, c1) composed with (a2, b2, c2)
"""
comp = rot(a1, b1, c1) @ rot(a2, b2, c2)
xyz = comp @ torch.tensor([0, 0, 1.])
a, b = x_to_alpha_beta(xyz)
rotz = rot(0, -b, -a) @ comp
c = atan2(rotz[1, 0], rotz[0, 0])
return a, b, c
def spherical_harmonics(order, alpha, beta, dtype = None):
return get_spherical_harmonics(order, theta = (pi - beta), phi = alpha)
| se3-transformer-pytorch-main | se3_transformer_pytorch/irr_repr.py |
import torch
from torch import nn, einsum
from einops import rearrange, repeat
class SinusoidalEmbeddings(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, t):
freqs = t[..., None].float() * self.inv_freq[None, :]
return repeat(freqs, '... d -> ... (d r)', r = 2)
def rotate_half(x):
x = rearrange(x, '... (d j) m -> ... d j m', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -2)
def apply_rotary_pos_emb(t, freqs):
rot_dim = freqs.shape[-2]
t, t_pass = t[..., :rot_dim, :], t[..., rot_dim:, :]
t = (t * freqs.cos()) + (rotate_half(t) * freqs.sin())
return torch.cat((t, t_pass), dim = -2)
| se3-transformer-pytorch-main | se3_transformer_pytorch/rotary.py |
from setuptools import setup, find_packages
setup(
name = 'halonet-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'HaloNet - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/halonet-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'attention mechanism'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| halonet-pytorch-main | setup.py |
from halonet_pytorch.halonet_pytorch import HaloAttention
| halonet-pytorch-main | halonet_pytorch/__init__.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# relative positional embedding
def to(x):
return {'device': x.device, 'dtype': x.dtype}
def pair(x):
return (x, x) if not isinstance(x, tuple) else x
def expand_dim(t, dim, k):
t = t.unsqueeze(dim = dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def rel_to_abs(x):
b, l, m = x.shape
r = (m + 1) // 2
col_pad = torch.zeros((b, l, 1), **to(x))
x = torch.cat((x, col_pad), dim = 2)
flat_x = rearrange(x, 'b l c -> b (l c)')
flat_pad = torch.zeros((b, m - l), **to(x))
flat_x_padded = torch.cat((flat_x, flat_pad), dim = 1)
final_x = flat_x_padded.reshape(b, l + 1, m)
final_x = final_x[:, :l, -r:]
return final_x
def relative_logits_1d(q, rel_k):
b, h, w, _ = q.shape
r = (rel_k.shape[0] + 1) // 2
logits = einsum('b x y d, r d -> b x y r', q, rel_k)
logits = rearrange(logits, 'b x y r -> (b x) y r')
logits = rel_to_abs(logits)
logits = logits.reshape(b, h, w, r)
logits = expand_dim(logits, dim = 2, k = r)
return logits
class RelPosEmb(nn.Module):
def __init__(
self,
block_size,
rel_size,
dim_head
):
super().__init__()
height = width = rel_size
scale = dim_head ** -0.5
self.block_size = block_size
self.rel_height = nn.Parameter(torch.randn(height * 2 - 1, dim_head) * scale)
self.rel_width = nn.Parameter(torch.randn(width * 2 - 1, dim_head) * scale)
def forward(self, q):
block = self.block_size
q = rearrange(q, 'b (x y) c -> b x y c', x = block)
rel_logits_w = relative_logits_1d(q, self.rel_width)
rel_logits_w = rearrange(rel_logits_w, 'b x i y j-> b (x y) (i j)')
q = rearrange(q, 'b x y d -> b y x d')
rel_logits_h = relative_logits_1d(q, self.rel_height)
rel_logits_h = rearrange(rel_logits_h, 'b x i y j -> b (y x) (j i)')
return rel_logits_w + rel_logits_h
# classes
class HaloAttention(nn.Module):
def __init__(
self,
*,
dim,
block_size,
halo_size,
dim_head = 64,
heads = 8
):
super().__init__()
assert halo_size > 0, 'halo size must be greater than 0'
self.dim = dim
self.heads = heads
self.scale = dim_head ** -0.5
self.block_size = block_size
self.halo_size = halo_size
inner_dim = dim_head * heads
self.rel_pos_emb = RelPosEmb(
block_size = block_size,
rel_size = block_size + (halo_size * 2),
dim_head = dim_head
)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x):
b, c, h, w, block, halo, heads, device = *x.shape, self.block_size, self.halo_size, self.heads, x.device
assert h % block == 0 and w % block == 0, 'fmap dimensions must be divisible by the block size'
assert c == self.dim, f'channels for input ({c}) does not equal to the correct dimension ({self.dim})'
# get block neighborhoods, and prepare a halo-ed version (blocks with padding) for deriving key values
q_inp = rearrange(x, 'b c (h p1) (w p2) -> (b h w) (p1 p2) c', p1 = block, p2 = block)
kv_inp = F.unfold(x, kernel_size = block + halo * 2, stride = block, padding = halo)
kv_inp = rearrange(kv_inp, 'b (c j) i -> (b i) j c', c = c)
# derive queries, keys, values
q = self.to_q(q_inp)
k, v = self.to_kv(kv_inp).chunk(2, dim = -1)
# split heads
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = heads), (q, k, v))
# scale
q *= self.scale
# attention
sim = einsum('b i d, b j d -> b i j', q, k)
# add relative positional bias
sim += self.rel_pos_emb(q)
# mask out padding (in the paper, they claim to not need masks, but what about padding?)
mask = torch.ones(1, 1, h, w, device = device)
mask = F.unfold(mask, kernel_size = block + (halo * 2), stride = block, padding = halo)
mask = repeat(mask, '() j i -> (b i h) () j', b = b, h = heads)
mask = mask.bool()
max_neg_value = -torch.finfo(sim.dtype).max
sim.masked_fill_(mask, max_neg_value)
# attention
attn = sim.softmax(dim = -1)
# aggregate
out = einsum('b i j, b j d -> b i d', attn, v)
# merge and combine heads
out = rearrange(out, '(b h) n d -> b n (h d)', h = heads)
out = self.to_out(out)
# merge blocks back to original feature map
out = rearrange(out, '(b h w) (p1 p2) c -> b c (h p1) (w p2)', b = b, h = (h // block), w = (w // block), p1 = block, p2 = block)
return out
| halonet-pytorch-main | halonet_pytorch/halonet_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'isab-pytorch',
packages = find_packages(),
version = '0.2.3',
license='MIT',
description = 'Induced Set Attention Block - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/isab-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism'
],
install_requires=[
'torch',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| isab-pytorch-main | setup.py |