or4cl3ai/Aiden_t5
Text Generation
•
Updated
•
826
•
14
python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
from setuptools import setup, find_packages
setup(
name = 'Mega-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.0',
license='MIT',
description = 'Mega - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/Mega-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'attention mechanism',
'exponential moving average',
'long range arena'
],
install_requires=[
'einops>=0.4',
'scipy',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| Mega-pytorch-main | setup.py |
from mega_pytorch.mega_pytorch import Mega
from mega_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 512
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = Mega(
num_tokens = 256,
dim = 512,
depth = 8
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
x = np.array(np.frombuffer(file.read(int(95e6)), dtype = np.uint8))
train_x, valid_x = np.split(x, [int(90e6)])
data_train, data_val = torch.from_numpy(train_x), torch.from_numpy(valid_x)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f"\n\n {prime} \n\n {'-' * 80} \n")
sample = model.generate(inp[None, ...], GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str + "\n\n")
| Mega-pytorch-main | train.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.fft import rfft, irfft
from einops import rearrange
from einops.layers.torch import Rearrange
from scipy.fftpack import next_fast_len
# functions
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def default(val, d):
return val if exists(val) else d
def append_dims(x, num_dims):
if num_dims <= 0:
return x
return x.view(*x.shape, *((1,) * num_dims))
def conv1d_fft(x, weights, dim = -2, weight_dim = -1):
# O(N log(N)) 1d convolution using some fourier trick
assert weight_dim >= dim
N = x.shape[dim]
M = weights.shape[weight_dim]
fast_len = next_fast_len(N + M - 1)
f_x = rfft(x, n = fast_len, dim = dim)
f_weight = rfft(weights, n = fast_len, dim = weight_dim)
f_v_weight = f_x * append_dims(f_weight.conj(), weight_dim - dim)
out = irfft(f_v_weight, fast_len, dim = dim)
out = out.roll(-1, dims = (dim,))
indices = torch.arange(start = fast_len - N, end = fast_len, dtype = torch.long, device = x.device)
out = out.index_select(dim, indices)
return out
# positional bias for single-headed attention
class T5RelativePositionBias(nn.Module):
def __init__(
self,
scale,
causal = False,
num_buckets = 32,
max_distance = 128
):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, 1)
@staticmethod
def _relative_position_bucket(
relative_position,
causal = True,
num_buckets = 32,
max_distance = 128
):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def forward(self, x):
i, j, device = *x.shape[-2:], x.device
q_pos = torch.arange(i, dtype = torch.long, device = device)
k_pos = torch.arange(j, dtype = torch.long, device = device)
rel_pos = rearrange(k_pos, 'j -> 1 j') - rearrange(q_pos, 'i -> i 1')
rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j 1 -> i j')
return bias * self.scale
# classes
class LaplacianAttnFn(nn.Module):
def forward(self, x):
mu = math.sqrt(0.5)
std = math.sqrt((4 * math.pi) ** -1)
return (1 + torch.special.erf((x - mu) / (std * math.sqrt(2)))) * 0.5
class OffsetScale(nn.Module):
def __init__(self, dim, heads = 1):
super().__init__()
self.gamma = nn.Parameter(torch.ones(heads, dim))
self.beta = nn.Parameter(torch.zeros(heads, dim))
nn.init.normal_(self.gamma, std = 0.02)
def forward(self, x):
out = einsum('... d, h d -> ... h d', x, self.gamma) + self.beta
return out.unbind(dim = -2)
class SingleHeadedAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_qk,
dim_value,
causal = False,
laplacian_attn_fn = False
):
super().__init__()
self.causal = causal
self.laplacian_attn_fn = laplacian_attn_fn
self.attn_fn = partial(F.softmax, dim = -1) if not laplacian_attn_fn else LaplacianAttnFn()
self.rel_pos_bias = T5RelativePositionBias(causal = causal, scale = dim_qk ** 0.5)
self.to_qk = nn.Sequential(
nn.Linear(dim, dim_qk),
nn.SiLU()
)
self.offsetscale = OffsetScale(dim_qk, heads = 2)
self.to_v = nn.Sequential(
nn.Linear(dim, dim_value),
nn.SiLU()
)
def forward(self, x, v_input = None):
seq_len, dim, device, dtype = *x.shape[-2:], x.device, x.dtype
v_input = default(v_input, x)
qk, v = self.to_qk(x), self.to_v(v_input)
q, k = self.offsetscale(qk)
scale = (seq_len ** -1) if self.laplacian_attn_fn else (dim ** -0.5)
sim = einsum('b i d, b j d -> b i j', q, k) * scale
sim = sim + self.rel_pos_bias(sim)
if self.causal:
causal_mask = torch.ones((seq_len, seq_len), device = device, dtype = torch.bool).triu(1)
if self.causal and not self.laplacian_attn_fn:
# is softmax attention and using large negative value pre-softmax
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
attn = self.attn_fn(sim)
if self.causal and self.laplacian_attn_fn:
# if using laplacian attention function, zero out upper triangular with 0s
attn = attn.masked_fill(causal_mask, 0.)
return einsum('b i j, b j d -> b i d', attn, v)
class MultiHeadedEMA(nn.Module):
def __init__(
self,
*,
dim,
heads,
bidirectional = False,
norm_mhesa_heads = False
):
super().__init__()
self.bidirectional = bidirectional
self.expansion = nn.Parameter(torch.randn(heads * (2 if bidirectional else 1), dim))
self.reduction = nn.Parameter(torch.randn(heads * (2 if bidirectional else 1), dim))
# learned alpha and dampening factors
self.alphas = nn.Parameter(torch.randn(heads))
self.dampen_factors = nn.Parameter(torch.randn(heads))
if bidirectional:
self.reverse_alphas = nn.Parameter(torch.randn(heads))
self.reverse_dampen_factors = nn.Parameter(torch.randn(heads))
self.heads = heads
self.norm_heads = nn.Identity()
if norm_mhesa_heads:
# https://arxiv.org/abs/2210.06423 - retnet used sub-ln with some success as groupnorm
self.norm_heads = nn.Sequential(
Rearrange('b n h d -> b (h d) n'),
nn.GroupNorm(heads, dim * heads),
Rearrange('b (h d) n -> b n h d', h = heads)
)
def forward(self, x):
device, seq_len = x.device, x.shape[1]
# project in and split heads
x = einsum('... d, h d -> ... h d', x, self.expansion)
if self.bidirectional:
x, x_reversed = x.chunk(2, dim = -2)
x_reversed = torch.flip(x_reversed, dims = (1,))
# weights derived from alphas (learned exponential smoothing decay rate)
def apply_learned_ema_with_damping(x, alphas, dampen_factors):
alphas = alphas.sigmoid()
dampen_factors = dampen_factors.sigmoid()
reversed_powers = torch.arange(seq_len - 1, -1, -1, device = device)
K = alphas * (((1 - alphas) * dampen_factors) ** rearrange(reversed_powers, '... l -> ... l 1'))
# conv1d fft O(nlog(n))
return conv1d_fft(x, K, dim = -3, weight_dim = -2)
x = apply_learned_ema_with_damping(x, self.alphas, self.dampen_factors)
if self.bidirectional:
x_reversed = apply_learned_ema_with_damping(x_reversed, self.reverse_alphas, self.reverse_dampen_factors)
x_reversed = torch.flip(x_reversed, dims = (1,))
x = torch.cat((x, x_reversed), dim = -2)
# maybe norm heads
x = self.norm_heads(x)
# combine heads and out
return einsum('... h d, h d -> ... d', x, self.reduction)
# Mega Layer
# Single headed Attention + Multi-headed EMA, then GRU-esque gating
class MegaLayer(nn.Module):
def __init__(
self,
*,
dim = 128,
ema_heads = 16,
attn_dim_qk = 64,
attn_dim_value = 256,
laplacian_attn_fn = False,
causal = True,
norm_mhesa_heads = False
):
super().__init__()
self.single_headed_attn = SingleHeadedAttention(
dim = dim,
dim_qk = attn_dim_qk,
dim_value = attn_dim_value,
causal = causal,
laplacian_attn_fn = laplacian_attn_fn
)
self.multi_headed_ema = MultiHeadedEMA(
dim = dim,
heads = ema_heads,
bidirectional = not causal,
norm_mhesa_heads = norm_mhesa_heads
)
self.to_reset_gate = nn.Sequential(
nn.Linear(dim, attn_dim_value),
nn.SiLU()
)
self.to_update_gate = nn.Sequential(
nn.Linear(dim, dim),
nn.Sigmoid()
)
# equation 14, for calculating H
self.Wh = nn.Parameter(torch.randn(dim, dim))
self.Uh = nn.Parameter(torch.randn(attn_dim_value, dim))
self.bh = nn.Parameter(torch.randn(dim))
def forward(self, x, residual = None):
residual = default(residual, x)
ema_output = self.multi_headed_ema(x)
attn_output = self.single_headed_attn(ema_output, x)
reset_gate = self.to_reset_gate(ema_output)
update_gate = self.to_update_gate(ema_output)
gated_attn_output = attn_output * reset_gate
# equation 14
H = F.silu(ema_output @ self.Wh + gated_attn_output @ self.Uh + self.bh)
# update gate
return update_gate * H + (1 - update_gate) * residual
# Mega
def FeedForward(dim, ff_mult):
dim_hidden = int(dim * ff_mult)
return nn.Sequential(
nn.Linear(dim, dim_hidden),
nn.GELU(),
nn.Linear(dim_hidden, dim)
)
class Mega(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
depth,
ff_mult = 2,
pre_norm = False,
**kwargs
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.pre_norm = pre_norm
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
MegaLayer(dim = dim, **kwargs),
nn.LayerNorm(dim),
FeedForward(dim = dim, ff_mult = ff_mult),
nn.LayerNorm(dim)
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(dim) if pre_norm else nn.Identity(),
nn.Linear(dim, num_tokens)
)
def forward(self, x):
pre_norm = self.pre_norm
post_norm = not self.pre_norm
x = self.token_emb(x)
for mega_layer, mega_norm, ff, ff_norm in self.layers:
mega_maybe_prenorm = mega_norm if pre_norm else identity
ff_maybe_prenorm = ff_norm if pre_norm else identity
mega_maybe_postnorm = mega_norm if post_norm else identity
ff_maybe_postnorm = ff_norm if post_norm else identity
x = mega_layer(mega_maybe_prenorm(x), x)
x = mega_maybe_postnorm(x)
x = ff(ff_maybe_prenorm(x)) + x
x = ff_maybe_postnorm(x)
return self.to_logits(x)
| Mega-pytorch-main | mega_pytorch/mega_pytorch.py |
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# helper function
def exists(val):
return val is not None
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.net = net
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, temperature = 1., filter_thres = 0.9, **kwargs):
b, t, device = *start_tokens.shape, start_tokens.device
out = start_tokens
for _ in range(seq_len):
logits = self.net(out, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
out = out[:, t:]
return out
def forward(self, x, **kwargs):
x_inp, x_labels = x[:, :-1], x[:, 1:]
logits = self.net(x_inp, **kwargs)
return F.cross_entropy(rearrange(logits, 'b c n -> b n c'), x_labels)
| Mega-pytorch-main | mega_pytorch/autoregressive_wrapper.py |
from mega_pytorch.mega_pytorch import MegaLayer, Mega, MultiHeadedEMA
| Mega-pytorch-main | mega_pytorch/__init__.py |
import sys
from setuptools import setup, find_packages
sys.path[0:0] = ['deep_daze']
from version import __version__
setup(
name = 'deep-daze',
packages = find_packages(),
include_package_data = True,
entry_points={
'console_scripts': [
'imagine = deep_daze.cli:main',
],
},
version = __version__,
license='MIT',
description = 'Deep Daze',
author = 'Ryan Murdock, Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/deep-daze',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'implicit neural representations',
'text to image'
],
install_requires=[
'einops>=0.3',
'fire',
'ftfy',
'imageio>=2.9.0',
'siren-pytorch>=0.0.8',
'torch>=1.10',
'torch_optimizer',
'torchvision>=0.8.2',
'tqdm',
'regex'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| deep-daze-main | setup.py |
__version__ = '0.11.1'
| deep-daze-main | deep_daze/version.py |
from deep_daze.deep_daze import DeepDaze, Imagine
| deep-daze-main | deep_daze/__init__.py |
import sys
import fire
from deep_daze import Imagine
def train(
text=None,
img=None,
learning_rate=1e-5,
num_layers=16,
hidden_size=256,
batch_size=4,
gradient_accumulate_every=4,
epochs=20,
iterations=1050,
save_every=100,
image_width=512,
deeper=False,
overwrite=False,
save_progress=True,
seed=None,
open_folder=True,
save_date_time=False,
start_image_path=None,
start_image_train_iters=50,
theta_initial=None,
theta_hidden=None,
start_image_lr=3e-4,
lower_bound_cutout=0.1,
upper_bound_cutout=1.0,
saturate_bound=False,
create_story=False,
story_start_words=5,
story_words_per_epoch=5,
story_separator=None,
averaging_weight=0.3,
gauss_sampling=False,
gauss_mean=0.6,
gauss_std=0.2,
do_cutout=True,
center_bias=False,
center_focus=2,
jit=True,
save_gif=False,
save_video=False,
model_name="ViT-B/32",
optimizer="AdamP"
):
"""
:param text: (required) A phrase less than 77 tokens which you would like to visualize.
:param img: The path to a jpg or png image which you would like to imagine. Can be combined with text.
:param learning_rate: The learning rate of the neural net.
:param hidden_size: The hidden layer size of the Siren net.
:param num_layers: The number of hidden layers to use in the Siren neural net.
:param batch_size: The number of generated images to pass into Siren before calculating loss. Decreasing this can lower memory and accuracy.
:param gradient_accumulate_every: Calculate a weighted loss of n samples for each iteration. Increasing this can help increase accuracy with lower batch sizes.
:param epochs: The number of epochs to run.
:param iterations: The number of times to calculate and backpropagate loss in a given epoch.
:param save_progress: Whether or not to save images generated before training Siren is complete.
:param save_every: Generate an image every time iterations is a multiple of this number.
:param open_folder: Whether or not to open a folder showing your generated images.
:param overwrite: Whether or not to overwrite existing generated images of the same name.
:param deeper: Uses a Siren neural net with 32 hidden layers.
:param image_width: The desired resolution of the image.
:param seed: A seed to be used for deterministic runs.
:param save_date_time: Save files with a timestamp prepended e.g. `%y%m%d-%H%M%S-my_phrase_here.png`
:param start_image_path: Path to the image you would like to prime the generator with initially
:param start_image_train_iters: Number of iterations for priming, defaults to 50
:param theta_initial: Hyperparameter describing the frequency of the color space. Only applies to the first layer of the network.
:param theta_hidden: Hyperparameter describing the frequency of the color space. Only applies to the hidden layers of the network.
:param start_image_lr: Learning rate for the start image training.
:param upper_bound_cutout: The upper bound for the cutouts used in generation.
:param lower_bound_cutout: The lower bound for the cutouts used in generation.
:param saturate_bound: If True, the LOWER_BOUND_CUTOUT is linearly increased to 0.75 during training.
:param create_story: Creates a story by optimizing each epoch on a new sliding-window of the input words. If this is enabled, much longer texts than 77 tokens can be used. Requires save_progress to visualize the transitions of the story.
:param story_start_words: Only used if create_story is True. How many words to optimize on for the first epoch.
:param story_words_per_epoch: Only used if create_story is True. How many words to add to the optimization goal per epoch after the first one.
:param story_separator: Only used if create_story is True. Defines a separator like '.' that splits the text into groups for each epoch. Separator needs to be in the text otherwise it will be ignored!
:param averaging_weight: How much to weigh the averaged features of the random cutouts over the individual random cutouts. Increasing this value leads to more details being represented at the cost of some global coherence and a parcellation into smaller scenes.
:param gauss_sampling: Whether to use sampling from a Gaussian distribution instead of a uniform distribution.
:param gauss_mean: The mean of the Gaussian sampling distribution.
:param gauss_std: The standard deviation of the Gaussian sampling distribution.
:param do_cutouts: Whether to use random cutouts as an augmentation. This basically needs to be turned on unless some new augmentations are added in code eventually.
:param center_bias: Whether to use a Gaussian distribution centered around the center of the image to sample the locations of random cutouts instead of a uniform distribution. Leads to the main generated objects to be more focused in the center.
:param center_focus: How much to focus on the center if using center_bias. std = sampling_range / center_focus. High values lead to a very correct representation in the center but washed out colors and details towards the edges,
:param jit: Whether to use the jit-compiled CLIP model. The jit model is faster, but only compatible with torch version 1.7.1.
:param save_gif: Only used if save_progress is True. Saves a GIF animation of the generation procedure using the saved frames.
:param save_video: Only used if save_progress is True. Saves a MP4 animation of the generation procedure using the saved frames.
"""
# Don't instantiate imagine if the user just wants help.
if any("--help" in arg for arg in sys.argv):
print("Type `imagine --help` for usage info.")
sys.exit()
num_layers = 32 if deeper else num_layers
imagine = Imagine(
text=text,
img=img,
lr=learning_rate,
num_layers=num_layers,
batch_size=batch_size,
gradient_accumulate_every=gradient_accumulate_every,
epochs=epochs,
iterations=iterations,
image_width=image_width,
save_every=save_every,
save_progress=save_progress,
seed=seed,
open_folder=open_folder,
save_date_time=save_date_time,
start_image_path=start_image_path,
start_image_train_iters=start_image_train_iters,
theta_initial=theta_initial,
theta_hidden=theta_hidden,
start_image_lr=start_image_lr,
lower_bound_cutout=lower_bound_cutout,
upper_bound_cutout=upper_bound_cutout,
saturate_bound=saturate_bound,
create_story=create_story,
story_start_words=story_start_words,
story_words_per_epoch=story_words_per_epoch,
story_separator=story_separator,
averaging_weight=averaging_weight,
gauss_sampling=gauss_sampling,
gauss_mean=gauss_mean,
gauss_std=gauss_std,
do_cutout=do_cutout,
center_bias=center_bias,
center_focus=center_focus,
jit=jit,
hidden_size=hidden_size,
model_name=model_name,
optimizer=optimizer,
save_gif=save_gif,
save_video=save_video,
)
print('Starting up...')
if not overwrite and imagine.filename.exists():
answer = input('Imagined image already exists, do you want to overwrite? (y/n) ').lower()
if answer not in ('yes', 'y'):
sys.exit()
imagine()
def main():
fire.Fire(train)
| deep-daze-main | deep_daze/cli.py |
import os
import subprocess
import sys
import random
from datetime import datetime
from pathlib import Path
import torch
import torch.nn.functional as F
from siren_pytorch import SirenNet, SirenWrapper
from torch import nn
from torch.cuda.amp import GradScaler, autocast
from torch_optimizer import DiffGrad, AdamP
import numpy as np
from PIL import Image
from imageio import imread, mimsave
import torchvision.transforms as T
from tqdm import trange, tqdm
from .clip import load, tokenize
# Helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def interpolate(image, size):
return F.interpolate(image, (size, size), mode='bilinear', align_corners=False)
def rand_cutout(image, size, center_bias=False, center_focus=2):
width = image.shape[-1]
min_offset = 0
max_offset = width - size
if center_bias:
# sample around image center
center = max_offset / 2
std = center / center_focus
offset_x = int(random.gauss(mu=center, sigma=std))
offset_y = int(random.gauss(mu=center, sigma=std))
# resample uniformly if over boundaries
offset_x = random.randint(min_offset, max_offset) if (offset_x > max_offset or offset_x < min_offset) else offset_x
offset_y = random.randint(min_offset, max_offset) if (offset_y > max_offset or offset_y < min_offset) else offset_y
else:
offset_x = random.randint(min_offset, max_offset)
offset_y = random.randint(min_offset, max_offset)
cutout = image[:, :, offset_x:offset_x + size, offset_y:offset_y + size]
return cutout
def create_clip_img_transform(image_width):
clip_mean = [0.48145466, 0.4578275, 0.40821073]
clip_std = [0.26862954, 0.26130258, 0.27577711]
transform = T.Compose([
#T.ToPILImage(),
T.Resize(image_width),
T.CenterCrop((image_width, image_width)),
T.ToTensor(),
T.Normalize(mean=clip_mean, std=clip_std)
])
return transform
def open_folder(path):
if os.path.isfile(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
return
cmd_list = None
if sys.platform == 'darwin':
cmd_list = ['open', '--', path]
elif sys.platform == 'linux2' or sys.platform == 'linux':
cmd_list = ['xdg-open', path]
elif sys.platform in ['win32', 'win64']:
cmd_list = ['explorer', path.replace('/', '\\')]
if cmd_list is None:
return
try:
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError:
pass
except OSError:
pass
def norm_siren_output(img):
return ((img + 1) * 0.5).clamp(0.0, 1.0)
def create_text_path(context_length, text=None, img=None, encoding=None, separator=None):
if text is not None:
if separator is not None and separator in text:
#Reduces filename to first epoch text
text = text[:text.index(separator, )]
input_name = text.replace(" ", "_")[:context_length]
elif img is not None:
if isinstance(img, str):
input_name = "".join(img.replace(" ", "_").split(".")[:-1])
else:
input_name = "PIL_img"
else:
input_name = "your_encoding"
return input_name
class DeepDaze(nn.Module):
def __init__(
self,
clip_perceptor,
clip_norm,
input_res,
total_batches,
batch_size,
num_layers=8,
image_width=512,
loss_coef=100,
theta_initial=None,
theta_hidden=None,
lower_bound_cutout=0.1, # should be smaller than 0.8
upper_bound_cutout=1.0,
saturate_bound=False,
gauss_sampling=False,
gauss_mean=0.6,
gauss_std=0.2,
do_cutout=True,
center_bias=False,
center_focus=2,
hidden_size=256,
averaging_weight=0.3,
):
super().__init__()
# load clip
self.perceptor = clip_perceptor
self.input_resolution = input_res
self.normalize_image = clip_norm
self.loss_coef = loss_coef
self.image_width = image_width
self.batch_size = batch_size
self.total_batches = total_batches
self.num_batches_processed = 0
w0 = default(theta_hidden, 30.)
w0_initial = default(theta_initial, 30.)
siren = SirenNet(
dim_in=2,
dim_hidden=hidden_size,
num_layers=num_layers,
dim_out=3,
use_bias=True,
w0=w0,
w0_initial=w0_initial
)
self.model = SirenWrapper(
siren,
image_width=image_width,
image_height=image_width
)
self.saturate_bound = saturate_bound
self.saturate_limit = 0.75 # cutouts above this value lead to destabilization
self.lower_bound_cutout = lower_bound_cutout
self.upper_bound_cutout = upper_bound_cutout
self.gauss_sampling = gauss_sampling
self.gauss_mean = gauss_mean
self.gauss_std = gauss_std
self.do_cutout = do_cutout
self.center_bias = center_bias
self.center_focus = center_focus
self.averaging_weight = averaging_weight
def sample_sizes(self, lower, upper, width, gauss_mean):
if self.gauss_sampling:
gauss_samples = torch.zeros(self.batch_size).normal_(mean=gauss_mean, std=self.gauss_std)
outside_bounds_mask = (gauss_samples > upper) | (gauss_samples < upper)
gauss_samples[outside_bounds_mask] = torch.zeros((len(gauss_samples[outside_bounds_mask]),)).uniform_(lower, upper)
sizes = (gauss_samples * width).int()
else:
lower *= width
upper *= width
sizes = torch.randint(int(lower), int(upper), (self.batch_size,))
return sizes
def forward(self, text_embed, return_loss=True, dry_run=False):
out = self.model()
out = norm_siren_output(out)
if not return_loss:
return out
# determine upper and lower sampling bound
width = out.shape[-1]
lower_bound = self.lower_bound_cutout
if self.saturate_bound:
progress_fraction = self.num_batches_processed / self.total_batches
lower_bound += (self.saturate_limit - self.lower_bound_cutout) * progress_fraction
# sample cutout sizes between lower and upper bound
sizes = self.sample_sizes(lower_bound, self.upper_bound_cutout, width, self.gauss_mean)
# create normalized random cutouts
if self.do_cutout:
image_pieces = [rand_cutout(out, size, center_bias=self.center_bias, center_focus=self.center_focus) for size in sizes]
image_pieces = [interpolate(piece, self.input_resolution) for piece in image_pieces]
else:
image_pieces = [interpolate(out.clone(), self.input_resolution) for _ in sizes]
# normalize
image_pieces = torch.cat([self.normalize_image(piece) for piece in image_pieces])
# calc image embedding
with autocast(enabled=False):
image_embed = self.perceptor.encode_image(image_pieces)
# calc loss
# loss over averaged features of cutouts
avg_image_embed = image_embed.mean(dim=0).unsqueeze(0)
averaged_loss = -self.loss_coef * torch.cosine_similarity(text_embed, avg_image_embed, dim=-1).mean()
# loss over all cutouts
general_loss = -self.loss_coef * torch.cosine_similarity(text_embed, image_embed, dim=-1).mean()
# merge losses
loss = averaged_loss * (self.averaging_weight) + general_loss * (1 - self.averaging_weight)
# count batches
if not dry_run:
self.num_batches_processed += self.batch_size
return out, loss
class Imagine(nn.Module):
def __init__(
self,
*,
text=None,
img=None,
clip_encoding=None,
lr=1e-5,
batch_size=4,
gradient_accumulate_every=4,
save_every=100,
image_width=512,
num_layers=16,
epochs=20,
iterations=1050,
save_progress=True,
seed=None,
open_folder=True,
save_date_time=False,
start_image_path=None,
start_image_train_iters=10,
start_image_lr=3e-4,
theta_initial=None,
theta_hidden=None,
model_name="ViT-B/32",
lower_bound_cutout=0.1, # should be smaller than 0.8
upper_bound_cutout=1.0,
saturate_bound=False,
averaging_weight=0.3,
create_story=False,
story_start_words=5,
story_words_per_epoch=5,
story_separator=None,
gauss_sampling=False,
gauss_mean=0.6,
gauss_std=0.2,
do_cutout=True,
center_bias=False,
center_focus=2,
optimizer="AdamP",
jit=True,
hidden_size=256,
save_gif=False,
save_video=False,
):
super().__init__()
if exists(seed):
tqdm.write(f'setting seed: {seed}')
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
# fields for story creation:
self.create_story = create_story
self.words = None
self.separator = str(story_separator) if story_separator is not None else None
if self.separator is not None and text is not None:
#exit if text is just the separator
if str(text).replace(' ','').replace(self.separator,'') == '':
print('Exiting because the text only consists of the separator! Needs words or phrases that are separated by the separator.')
exit()
#adds a space to each separator and removes double spaces that might be generated
text = text.replace(self.separator,self.separator+' ').replace(' ',' ').strip()
self.all_words = text.split(" ") if text is not None else None
self.num_start_words = story_start_words
self.words_per_epoch = story_words_per_epoch
if create_story:
assert text is not None, "We need text input to create a story..."
# overwrite epochs to match story length
num_words = len(self.all_words)
self.epochs = 1 + (num_words - self.num_start_words) / self.words_per_epoch
# add one epoch if not divisible
self.epochs = int(self.epochs) if int(self.epochs) == self.epochs else int(self.epochs) + 1
if self.separator is not None:
if self.separator not in text:
print("Separator '"+self.separator+"' will be ignored since not in text!")
self.separator = None
else:
self.epochs = len(list(filter(None,text.split(self.separator))))
print("Running for", self.epochs, "epochs" + (" (split with '"+self.separator+"' as the separator)" if self.separator is not None else ""))
else:
self.epochs = epochs
# jit models only compatible with version 1.7.1
if "1.7.1" not in torch.__version__:
if jit == True:
print("Setting jit to False because torch version is not 1.7.1.")
jit = False
# Load CLIP
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
clip_perceptor, norm = load(model_name, jit=jit, device=self.device)
self.perceptor = clip_perceptor.eval()
for param in self.perceptor.parameters():
param.requires_grad = False
if jit == False:
input_res = clip_perceptor.visual.input_resolution
else:
input_res = clip_perceptor.input_resolution.item()
self.clip_transform = create_clip_img_transform(input_res)
self.iterations = iterations
self.image_width = image_width
total_batches = self.epochs * self.iterations * batch_size * gradient_accumulate_every
model = DeepDaze(
self.perceptor,
norm,
input_res,
total_batches,
batch_size=batch_size,
image_width=image_width,
num_layers=num_layers,
theta_initial=theta_initial,
theta_hidden=theta_hidden,
lower_bound_cutout=lower_bound_cutout,
upper_bound_cutout=upper_bound_cutout,
saturate_bound=saturate_bound,
gauss_sampling=gauss_sampling,
gauss_mean=gauss_mean,
gauss_std=gauss_std,
do_cutout=do_cutout,
center_bias=center_bias,
center_focus=center_focus,
hidden_size=hidden_size,
averaging_weight=averaging_weight,
).to(self.device)
self.model = model
self.scaler = GradScaler()
siren_params = model.model.parameters()
if optimizer == "AdamP":
self.optimizer = AdamP(siren_params, lr)
elif optimizer == "Adam":
self.optimizer = torch.optim.Adam(siren_params, lr)
elif optimizer == "DiffGrad":
self.optimizer = DiffGrad(siren_params, lr)
self.gradient_accumulate_every = gradient_accumulate_every
self.save_every = save_every
self.save_date_time = save_date_time
self.open_folder = open_folder
self.save_progress = save_progress
self.text = text
self.image = img
self.textpath = create_text_path(self.perceptor.context_length, text=text, img=img, encoding=clip_encoding, separator=story_separator)
self.filename = self.image_output_path()
# create coding to optimize for
self.clip_encoding = self.create_clip_encoding(text=text, img=img, encoding=clip_encoding)
self.start_image = None
self.start_image_train_iters = start_image_train_iters
self.start_image_lr = start_image_lr
if exists(start_image_path):
file = Path(start_image_path)
assert file.exists(), f'file does not exist at given starting image path {start_image_path}'
image = Image.open(str(file))
start_img_transform = T.Compose([T.Resize(image_width),
T.CenterCrop((image_width, image_width)),
T.ToTensor()])
image_tensor = start_img_transform(image).unsqueeze(0).to(self.device)
self.start_image = image_tensor
self.save_gif = save_gif
self.save_video = save_video
def create_clip_encoding(self, text=None, img=None, encoding=None):
self.text = text
self.img = img
if encoding is not None:
encoding = encoding.to(self.device)
elif self.create_story:
encoding = self.update_story_encoding(epoch=0, iteration=1)
elif text is not None and img is not None:
encoding = (self.create_text_encoding(text) + self.create_img_encoding(img)) / 2
elif text is not None:
encoding = self.create_text_encoding(text)
elif img is not None:
encoding = self.create_img_encoding(img)
return encoding
def create_text_encoding(self, text):
tokenized_text = tokenize(text).to(self.device)
with torch.no_grad():
text_encoding = self.perceptor.encode_text(tokenized_text).detach()
return text_encoding
def create_img_encoding(self, img):
if isinstance(img, str):
img = Image.open(img)
normed_img = self.clip_transform(img).unsqueeze(0).to(self.device)
with torch.no_grad():
img_encoding = self.perceptor.encode_image(normed_img).detach()
return img_encoding
def set_clip_encoding(self, text=None, img=None, encoding=None):
encoding = self.create_clip_encoding(text=text, img=img, encoding=encoding)
self.clip_encoding = encoding.to(self.device)
def index_of_first_separator(self) -> int:
for c, word in enumerate(self.all_words):
if self.separator in str(word):
return c +1
def update_story_encoding(self, epoch, iteration):
if self.separator is not None:
self.words = " ".join(self.all_words[:self.index_of_first_separator()])
#removes separator from epoch-text
self.words = self.words.replace(self.separator,'')
self.all_words = self.all_words[self.index_of_first_separator():]
else:
if self.words is None:
self.words = " ".join(self.all_words[:self.num_start_words])
self.all_words = self.all_words[self.num_start_words:]
else:
# add words_per_epoch new words
count = 0
while count < self.words_per_epoch and len(self.all_words) > 0:
new_word = self.all_words[0]
self.words = " ".join(self.words.split(" ") + [new_word])
self.all_words = self.all_words[1:]
count += 1
# remove words until it fits in context length
while len(self.words) > self.perceptor.context_length:
# remove first word
self.words = " ".join(self.words.split(" ")[1:])
# get new encoding
print("Now thinking of: ", '"', self.words, '"')
sequence_number = self.get_img_sequence_number(epoch, iteration)
# save new words to disc
with open("story_transitions.txt", "a") as f:
f.write(f"{epoch}, {sequence_number}, {self.words}\n")
encoding = self.create_text_encoding(self.words)
return encoding
def image_output_path(self, sequence_number=None):
"""
Returns underscore separated Path.
A current timestamp is prepended if `self.save_date_time` is set.
Sequence number left padded with 6 zeroes is appended if `save_every` is set.
:rtype: Path
"""
output_path = self.textpath
if sequence_number:
sequence_number_left_padded = str(sequence_number).zfill(6)
output_path = f"{output_path}.{sequence_number_left_padded}"
if self.save_date_time:
current_time = datetime.now().strftime("%y%m%d-%H%M%S_%f")
output_path = f"{current_time}_{output_path}"
return Path(f"{output_path}.jpg")
def train_step(self, epoch, iteration):
total_loss = 0
for _ in range(self.gradient_accumulate_every):
with autocast(enabled=True):
out, loss = self.model(self.clip_encoding)
loss = loss / self.gradient_accumulate_every
total_loss += loss
self.scaler.scale(loss).backward()
out = out.cpu().float().clamp(0., 1.)
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
if (iteration % self.save_every == 0) and self.save_progress:
self.save_image(epoch, iteration, img=out)
return out, total_loss
def get_img_sequence_number(self, epoch, iteration):
current_total_iterations = epoch * self.iterations + iteration
sequence_number = current_total_iterations // self.save_every
return sequence_number
@torch.no_grad()
def save_image(self, epoch, iteration, img=None):
sequence_number = self.get_img_sequence_number(epoch, iteration)
if img is None:
img = self.model(self.clip_encoding, return_loss=False).cpu().float().clamp(0., 1.)
self.filename = self.image_output_path(sequence_number=sequence_number)
pil_img = T.ToPILImage()(img.squeeze())
pil_img.save(self.filename, quality=95, subsampling=0)
pil_img.save(f"{self.textpath}.jpg", quality=95, subsampling=0)
tqdm.write(f'image updated at "./{str(self.filename)}"')
def generate_gif(self):
images = []
for file_name in sorted(os.listdir('./')):
if file_name.startswith(self.textpath) and file_name != f'{self.textpath}.jpg':
images.append(imread(os.path.join('./', file_name)))
if self.save_video:
mimsave(f'{self.textpath}.mp4', images)
print(f'Generated image generation animation at ./{self.textpath}.mp4')
if self.save_gif:
mimsave(f'{self.textpath}.gif', images)
print(f'Generated image generation animation at ./{self.textpath}.gif')
def forward(self):
if exists(self.start_image):
tqdm.write('Preparing with initial image...')
optim = DiffGrad(self.model.model.parameters(), lr = self.start_image_lr)
pbar = trange(self.start_image_train_iters, desc='iteration')
try:
for _ in pbar:
loss = self.model.model(self.start_image)
loss.backward()
pbar.set_description(f'loss: {loss.item():.2f}')
optim.step()
optim.zero_grad()
except KeyboardInterrupt:
print('interrupted by keyboard, gracefully exiting')
return exit()
del self.start_image
del optim
tqdm.write(f'Imagining "{self.textpath}" from the depths of my weights...')
with torch.no_grad():
self.model(self.clip_encoding, dry_run=True) # do one warmup step due to potential issue with CLIP and CUDA
if self.open_folder:
open_folder('./')
self.open_folder = False
try:
for epoch in trange(self.epochs, desc='epochs'):
pbar = trange(self.iterations, desc='iteration')
for i in pbar:
_, loss = self.train_step(epoch, i)
pbar.set_description(f'loss: {loss.item():.2f}')
# Update clip_encoding per epoch if we are creating a story
if self.create_story:
self.clip_encoding = self.update_story_encoding(epoch, i)
except KeyboardInterrupt:
print('interrupted by keyboard, gracefully exiting')
return
self.save_image(epoch, i) # one final save at end
if (self.save_gif or self.save_video) and self.save_progress:
self.generate_gif()
| deep-daze-main | deep_daze/deep_daze.py |
from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from pathlib import Path
import hashlib
import os
import urllib
import warnings
from typing import Union, List
from torchvision.transforms import Compose, Normalize
from tqdm import tqdm
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(
total=int(source.info().get("Content-Length")),
unit='iB',
unit_scale=True,
desc=f"Downloading {filename}",
) as loop:
while True:
buffer = source.read(524288)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform():
return Compose([
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform()
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform()
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
import html
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
_tokenizer = SimpleTokenizer()
| deep-daze-main | deep_daze/clip.py |
from setuptools import setup, find_packages
setup(
name = 'reformer_pytorch',
packages = find_packages(exclude=['examples', 'pretraining']),
version = '1.4.4',
license='MIT',
description = 'Reformer, the Efficient Transformer, Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/reformer-pytorch',
keywords = ['transformers', 'attention', 'artificial intelligence'],
install_requires=[
'axial-positional-embedding>=0.1.0',
'einops',
'local-attention',
'product-key-memory',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| reformer-pytorch-master | setup.py |
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence
from reformer_pytorch.reformer_pytorch import ReformerLM
from reformer_pytorch.autopadder import Autopadder
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class TrainingWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
assert isinstance(net, ReformerLM), 'generative trainer wrapper can only accept ReformerLM class'
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = Autopadder(net)
self.max_seq_len = net.max_seq_len
@torch.no_grad()
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
was_training = self.net.training
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
self.net.eval()
out = start_tokens
input_mask = kwargs.pop('input_mask', None)
if input_mask is None:
input_mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
input_mask = input_mask[:, -self.max_seq_len:]
logits = self.net(x, input_mask=input_mask, **kwargs)[:, -1, :]
filtered_logits = filter_logits_fn(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
input_mask = F.pad(input_mask, (0, 1), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
self.net.train(was_training)
return out
def forward(self, x, return_loss = False, **kwargs):
pad = partial(pad_sequence, batch_first = True, padding_value = self.pad_value)
if not return_loss:
if not isinstance(x, torch.Tensor):
x = pad(x)
return self.net(x, **kwargs)
if isinstance(x, torch.Tensor):
xi = x[:, :-1]
xo = x[:, 1:]
else:
xi = pad(list(map(lambda t: t[:-1], x)))
xo = pad(list(map(lambda t: t[1:], x)))
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| reformer-pytorch-master | reformer_pytorch/generative_tools.py |
import math
import torch
from torch import nn
import torch.nn.functional as F
from reformer_pytorch.reformer_pytorch import Reformer, ReformerLM, LSHSelfAttention
def pad_to_multiple(tensor, seqlen, multiple, dim=-1):
m = seqlen / multiple
if m.is_integer():
return tensor
remainder = math.ceil(m) * multiple - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value=0)
class Autopadder(nn.Module):
def __init__(self, net):
super().__init__()
assert isinstance(net, (LSHSelfAttention, Reformer, ReformerLM)), 'only modules LSHSelfAttention, Reformer, ReformerLM accepted'
self.net = net
reformer = net.reformer if isinstance(net, ReformerLM) else net
self.pad_dim = -1 if isinstance(net, ReformerLM) else -2
self.bucket_size = reformer.bucket_size
self.num_mem_kv = reformer.num_mem_kv
self.full_attn_thres = reformer.full_attn_thres
def forward(self, x, **kwargs):
b, t, m, device = *x.shape[:2], self.num_mem_kv, x.device
keys = kwargs.get('keys')
input_mask = kwargs.get('input_mask')
input_attn_mask = kwargs.get('input_attn_mask')
k_len = 0 if keys is None else keys.shape[1]
seqlen = t + m + k_len
if seqlen > self.full_attn_thres:
if input_mask is None:
input_mask = torch.full((b, t), True, device=x.device, dtype=torch.bool)
x = pad_to_multiple(x, seqlen, self.bucket_size * 2, dim=self.pad_dim)
if input_mask is not None:
new_mask = F.pad(input_mask, (0, x.shape[1] - input_mask.shape[1]), value=False)
kwargs.update(input_mask=new_mask)
if input_attn_mask is not None:
offset = x.shape[1] - input_attn_mask.shape[1]
new_mask = F.pad(input_attn_mask, (0, offset, 0, offset), value=False)
kwargs.update(input_attn_mask=new_mask)
out = self.net(x, **kwargs)
return out[:, 0:t]
| reformer-pytorch-master | reformer_pytorch/autopadder.py |
import re
from torch import nn
from reformer_pytorch.reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
ENC_PREFIX = 'enc_'
DEC_PREFIX = 'dec_'
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return bool(re.match(f'^{prefix}', str))
def group_by_key_prefix(prefix, d):
return group_dict_by_key(lambda x: string_begins_with(prefix, x), d)
def group_by_key_prefix_and_remove_prefix(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(lambda x: string_begins_with(prefix, x), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
def extract_enc_dec_kwargs(kwargs):
enc_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(ENC_PREFIX, kwargs)
dec_kwargs, kwargs = group_by_key_prefix_and_remove_prefix(DEC_PREFIX, kwargs)
return enc_kwargs, dec_kwargs, kwargs
def extract_and_set_enc_dec_kwargs(kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_enc_dec_kwargs(kwargs)
if 'input_mask' in enc_kwargs:
dec_kwargs.setdefault('context_mask', enc_kwargs['input_mask'])
return enc_kwargs, dec_kwargs, kwargs
class ReformerEncDec(nn.Module):
def __init__(self, dim, ignore_index = 0, pad_value = 0, **kwargs):
super().__init__()
enc_kwargs, dec_kwargs, _ = extract_enc_dec_kwargs(kwargs)
assert 'return_embedding' not in enc_kwargs, 'you cannot manually set the return embeddings flag for the encoder'
assert 'dim' not in dec_kwargs and 'dim' not in enc_kwargs, 'you must set the dim for both encoder and decoder'
enc_kwargs['dim'] = dec_kwargs['dim'] = dim
enc_kwargs['return_embeddings'] = True
dec_kwargs['causal'] = True
enc_kwargs.setdefault('bucket_size', 64)
dec_kwargs.setdefault('bucket_size', enc_kwargs['bucket_size'] * 2)
enc = ReformerLM(**enc_kwargs)
dec = ReformerLM(**dec_kwargs)
self.enc = TrainingWrapper(enc, ignore_index = ignore_index, pad_value = pad_value)
self.dec = TrainingWrapper(dec, ignore_index = ignore_index, pad_value = pad_value)
def generate(self, seq_in, seq_out_start, seq_len, **kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs)
enc_keys = self.enc(seq_in, **enc_kwargs)
return self.dec.generate(seq_out_start, seq_len, keys = enc_keys, **{**dec_kwargs, **kwargs})
def forward(self, seq_in, seq_out, return_loss = False, **kwargs):
enc_kwargs, dec_kwargs, kwargs = extract_and_set_enc_dec_kwargs(kwargs)
enc_keys = self.enc(seq_in, **enc_kwargs)
return self.dec(seq_out, return_loss = return_loss, keys = enc_keys, **dec_kwargs)
| reformer-pytorch-master | reformer_pytorch/reformer_enc_dec.py |
import torch
import torch.nn as nn
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g, depth=None, send_signal = False):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
self.depth = depth
self.send_signal = send_signal
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = False
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
if self.send_signal:
f_args['_reverse'] = g_args['_reverse'] = True
f_args['_depth'] = g_args['_depth'] = self.depth
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class IrreversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = f
self.g = g
def forward(self, x, f_args, g_args):
x1, x2 = torch.chunk(x, 2, dim=2)
y1 = x1 + self.f(x2, **f_args)
y2 = x2 + self.g(y1, **g_args)
return torch.cat([y1, y2], dim=2)
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, kwargs):
ctx.kwargs = kwargs
for block in blocks:
x = block(x, **kwargs)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
kwargs = ctx.kwargs
for block in ctx.blocks[::-1]:
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class ReversibleSequence(nn.Module):
def __init__(self, blocks, layer_dropout = 0., reverse_thres = 0, send_signal = False):
super().__init__()
self.layer_dropout = layer_dropout
self.reverse_thres = reverse_thres
self.blocks = nn.ModuleList([ReversibleBlock(f, g, depth, send_signal) for depth, (f, g) in enumerate(blocks)])
self.irrev_blocks = nn.ModuleList([IrreversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, arg_route = (True, False), **kwargs):
reverse = x.shape[1] > self.reverse_thres
blocks = self.blocks if reverse else self.irrev_blocks
if self.training and self.layer_dropout > 0:
to_drop = torch.empty(len(self.blocks)).uniform_(0, 1) < self.layer_dropout
blocks = [block for block, drop in zip(self.blocks, to_drop) if not drop]
blocks = self.blocks[:1] if len(blocks) == 0 else blocks
f_args, g_args = map(lambda route: kwargs if route else {}, arg_route)
block_kwargs = {'f_args': f_args, 'g_args': g_args}
if not reverse:
for block in blocks:
x = block(x, **block_kwargs)
return x
return _ReversibleFunction.apply(x, blocks, block_kwargs)
| reformer-pytorch-master | reformer_pytorch/reversible.py |
from torch import nn
from reformer_pytorch.reformer_pytorch import LSHAttention, LSHSelfAttention
from collections import defaultdict
class Recorder(nn.Module):
def __init__(self, net):
super().__init__()
self.iter = 0
self.recordings = defaultdict(list)
self.net = net
self.on = True
self.ejected = False
def eject(self):
self.ejected = True
self.clear()
self.unwire()
return self.net
def wire(self):
for module in self.net.modules():
if isinstance(module, LSHAttention):
module._return_attn = True
if isinstance(module, LSHSelfAttention):
module.callback = self.record
def unwire(self):
for module in self.net.modules():
if isinstance(module, LSHAttention):
module._return_attn = False
if isinstance(module, LSHSelfAttention):
module.callback = None
def turn_on(self):
self.on = True
def turn_off(self):
self.on = False
def clear(self):
del self.recordings
self.recordings = defaultdict(list)
self.iter = 0
def record(self, attn, buckets):
if not self.on: return
data = {'attn': attn.detach().cpu(), 'buckets': buckets.detach().cpu()}
self.recordings[self.iter].append(data)
def forward(self, x, **kwargs):
assert not self.ejected, 'Recorder has already been ejected and disposed'
if self.on:
self.wire()
out = self.net(x, **kwargs)
self.iter += 1
self.unwire()
return out
| reformer-pytorch-master | reformer_pytorch/recorder.py |
from reformer_pytorch.reformer_pytorch import LSHAttention, LSHSelfAttention, Reformer, ReformerLM
from reformer_pytorch.reformer_enc_dec import ReformerEncDec
from reformer_pytorch.recorder import Recorder
from reformer_pytorch.autopadder import Autopadder
| reformer-pytorch-master | reformer_pytorch/__init__.py |
import math
import torch
import torch.nn as nn
from torch.nn import Identity
import torch.nn.functional as F
from torch.autograd import Function
from functools import partial, reduce, wraps
from itertools import chain
from operator import mul
from local_attention import LocalAttention
from axial_positional_embedding import AxialPositionalEmbedding
from product_key_memory import PKM
from reformer_pytorch.reversible import ReversibleSequence
from einops import rearrange, repeat
#constants
TOKEN_SELF_ATTN_VALUE = -5e4 # carefully set for half precision to work
# helper fns
def exists(val):
return val is not None
def sort_key_val(t1, t2, dim=-1):
values, indices = t1.sort(dim=dim)
t2 = t2.expand_as(t1)
return values, t2.gather(dim, indices)
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
def process_inputs_chunk(fn, chunks=1, dim=0):
def inner_fn(*args, **kwargs):
keys, values, len_args = kwargs.keys(), kwargs.values(), len(args)
chunked_args = list(zip(*map(lambda x: x.chunk(chunks, dim=dim), list(args) + list(values))))
all_args = map(lambda x: (x[:len_args], dict(zip(keys, x[len_args:]))), chunked_args)
outputs = [fn(*c_args, **c_kwargs) for c_args, c_kwargs in all_args]
return tuple(map(lambda x: torch.cat(x, dim=dim), zip(*outputs)))
return inner_fn
def chunked_sum(tensor, chunks=1):
*orig_size, last_dim = tensor.shape
tensor = tensor.reshape(-1, last_dim)
summed_tensors = [c.sum(dim=-1) for c in tensor.chunk(chunks, dim=0)]
return torch.cat(summed_tensors, dim=0).reshape(orig_size)
def default(val, default_val):
return default_val if val is None else val
def cast_tuple(x):
return x if isinstance(x, tuple) else (x,)
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def cache_fn(f):
cache = None
@wraps(f)
def cached_fn(*args, **kwargs):
nonlocal cache
if cache is not None:
return cache
cache = f(*args, **kwargs)
return cache
return cached_fn
def cache_method_decorator(cache_attr, cache_namespace, reexecute = False):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, key_namespace=None, fetch=False, set_cache=True, **kwargs):
namespace_str = str(default(key_namespace, ''))
_cache = getattr(self, cache_attr)
_keyname = f'{cache_namespace}:{namespace_str}'
if fetch:
val = _cache[_keyname]
if reexecute:
fn(self, *args, **kwargs)
else:
val = fn(self, *args, **kwargs)
if set_cache:
setattr(self, cache_attr, {**_cache, **{_keyname: val}})
return val
return wrapper
return inner_fn
def expand_dim(dim, k, t):
t = t.unsqueeze(dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def merge_dims(ind_from, ind_to, tensor):
shape = list(tensor.shape)
arr_slice = slice(ind_from, ind_to + 1)
shape[arr_slice] = [reduce(mul, shape[arr_slice])]
return tensor.reshape(*shape)
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
# helper classes
class Always(nn.Module):
def __init__(self, val):
super().__init__()
self.val = val
def forward(self, *args, **kwargs):
return self.val
class MatrixMultiply(nn.Module):
def __init__(self, tensor, transpose = False, normalize = False):
super().__init__()
self.tensor = tensor
self.transpose = transpose
self.normalize = normalize
def forward(self, x):
tensor = self.tensor
if self.normalize:
tensor = F.normalize(tensor, dim=-1)
if self.transpose:
tensor = tensor.t()
return x @ tensor
class ReZero(nn.Module):
def __init__(self, fn):
super().__init__()
self.g = nn.Parameter(torch.zeros(1))
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.g
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.g = nn.Parameter(torch.ones(1))
self.eps = eps
def forward(self, x):
n = torch.norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return x / n * self.g
class PreNorm(nn.Module):
def __init__(self, norm_class, dim, fn):
super().__init__()
self.norm = norm_class(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class Chunk(nn.Module):
def __init__(self, chunks, fn, along_dim = -1):
super().__init__()
self.dim = along_dim
self.chunks = chunks
self.fn = fn
def forward(self, x, **kwargs):
if self.chunks == 1:
return self.fn(x, **kwargs)
chunks = x.chunk(self.chunks, dim = self.dim)
return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)
# LSH attention as described in https://openreview.net/pdf?id=rkgNKkHtvB
# adapted from trax, stripped to what paper said needed to work
# namely that buckets need to be at least 64 with 8 rounds of hashing
# https://github.com/google/trax/blob/master/trax/layers/research/efficient_attention.py#L442
class LSHAttention(nn.Module):
def __init__( self,
dropout = 0.,
bucket_size = 64,
n_hashes = 8,
causal = False,
allow_duplicate_attention = True,
attend_across_buckets = True,
rehash_each_round = True,
drop_for_hash_rate = 0.0,
random_rotations_per_head = False,
return_attn = False):
super().__init__()
if dropout >= 1.0:
raise ValueError('Dropout rates must be lower than 1.')
self.dropout = nn.Dropout(dropout)
self.dropout_for_hash = nn.Dropout(drop_for_hash_rate)
assert rehash_each_round or allow_duplicate_attention, (
'The setting {allow_duplicate_attention=False, rehash_each_round=False}'
' is not implemented.')
self.causal = causal
self.bucket_size = bucket_size
self.n_hashes = n_hashes
self._allow_duplicate_attention = allow_duplicate_attention
self._attend_across_buckets = attend_across_buckets
self._rehash_each_round = rehash_each_round
self._random_rotations_per_head = random_rotations_per_head
# will expend extra computation to return attention matrix
self._return_attn = return_attn
# cache buckets for reversible network, reported by authors to make Reformer work at depth
self._cache = {}
@cache_method_decorator('_cache', 'buckets', reexecute=True)
def hash_vectors(self, n_buckets, vecs):
batch_size = vecs.shape[0]
device = vecs.device
# See https://arxiv.org/pdf/1509.02897.pdf
# We sample a different random rotation for each round of hashing to
# decrease the probability of hash misses.
assert n_buckets % 2 == 0
rot_size = n_buckets
rotations_shape = (
batch_size if self._random_rotations_per_head else 1,
vecs.shape[-1],
self.n_hashes if self._rehash_each_round else 1,
rot_size // 2)
random_rotations = torch.randn(rotations_shape, dtype=vecs.dtype, device=device).expand(batch_size, -1, -1, -1)
dropped_vecs = self.dropout_for_hash(vecs)
rotated_vecs = torch.einsum('btf,bfhi->bhti', dropped_vecs, random_rotations)
if self._rehash_each_round:
# rotated_vectors size [batch,n_hash,seq_len,buckets]
rotated_vecs = torch.cat([rotated_vecs, -rotated_vecs], dim=-1)
buckets = torch.argmax(rotated_vecs, dim=-1)
else:
rotated_vecs = torch.cat([rotated_vecs, -rotated_vecs], dim=-1)
# In this configuration, we map each item to the top self.n_hashes buckets
rotated_vecs = torch.squeeze(rotated_vecs, 1)
bucket_range = torch.arange(rotated_vecs.shape[-1], device=device)
bucket_range = torch.reshape(bucket_range, (1, -1))
bucket_range = bucket_range.expand_as(rotated_vecs)
_, buckets = sort_key_val(rotated_vecs, bucket_range, dim=-1)
# buckets size [batch size, seq_len, buckets]
buckets = buckets[... , -self.n_hashes:].transpose(1, 2)
# buckets is now (self.n_hashes, seq_len). Next we add offsets so that
# bucket numbers from different hashing rounds don't overlap.
offsets = torch.arange(self.n_hashes, device=device)
offsets = torch.reshape(offsets * n_buckets, (1, -1, 1))
buckets = torch.reshape(buckets + offsets, (batch_size, -1,))
return buckets
def forward(self, qk, v, query_len = None, input_mask = None, input_attn_mask = None, pos_emb = None, **kwargs):
batch_size, seqlen, dim, device = *qk.shape, qk.device
query_len = default(query_len, seqlen)
is_reverse = kwargs.pop('_reverse', False)
depth = kwargs.pop('_depth', None)
assert seqlen % (self.bucket_size * 2) == 0, f'Sequence length ({seqlen}) needs to be divisible by target bucket size x 2 - {self.bucket_size * 2}'
n_buckets = seqlen // self.bucket_size
buckets = self.hash_vectors(n_buckets, qk, key_namespace=depth, fetch=is_reverse, set_cache=self.training)
# We use the same vector as both a query and a key.
assert int(buckets.shape[1]) == self.n_hashes * seqlen
total_hashes = self.n_hashes
ticker = torch.arange(total_hashes * seqlen, device=device).unsqueeze(0).expand_as(buckets)
buckets_and_t = seqlen * buckets + (ticker % seqlen)
buckets_and_t = buckets_and_t.detach()
# Hash-based sort ("s" at the start of variable names means "sorted")
sbuckets_and_t, sticker = sort_key_val(buckets_and_t, ticker, dim=-1)
_, undo_sort = sticker.sort(dim=-1)
del ticker
sbuckets_and_t = sbuckets_and_t.detach()
sticker = sticker.detach()
undo_sort = undo_sort.detach()
if exists(pos_emb):
qk = apply_rotary_pos_emb(qk, pos_emb)
st = (sticker % seqlen)
sqk = batched_index_select(qk, st)
sv = batched_index_select(v, st)
# Split off a "bin" axis so that attention only occurs within chunks.
chunk_size = total_hashes * n_buckets
bq_t = bkv_t = torch.reshape(st, (batch_size, chunk_size, -1))
bqk = torch.reshape(sqk, (batch_size, chunk_size, -1, dim))
bv = torch.reshape(sv, (batch_size, chunk_size, -1, dim))
# Hashing operates on unit-length vectors. Unnormalized query vectors are
# fine because they effectively provide a learnable temperature for the
# attention softmax, but normalizing keys is needed so that similarity for
# the purposes of attention correctly corresponds to hash locality.
bq = bqk
bk = F.normalize(bqk, p=2, dim=-1).type_as(bq)
# Allow each chunk to attend within itself, and also one chunk back. Chunk
# boundaries might occur in the middle of a sequence of items from the
# same bucket, so this increases the chances of attending to relevant items.
def look_one_back(x):
x_extra = torch.cat([x[:, -1:, ...], x[:, :-1, ...]], dim=1)
return torch.cat([x, x_extra], dim=2)
bk = look_one_back(bk)
bv = look_one_back(bv)
bkv_t = look_one_back(bkv_t)
# Dot-product attention.
dots = torch.einsum('bhie,bhje->bhij', bq, bk) * (dim ** -0.5)
masked_value = max_neg_value(dots)
# Mask for post qk attention logits of the input sequence
if input_attn_mask is not None:
input_attn_mask = F.pad(input_attn_mask, (0, seqlen - input_attn_mask.shape[-1], 0, seqlen - input_attn_mask.shape[-2]), value=True)
dot_attn_indices = ((bq_t * seqlen)[:, :, :, None] + bkv_t[:, :, None, :])
input_attn_mask = input_attn_mask.reshape(batch_size, -1)
dot_attn_indices = dot_attn_indices.reshape(batch_size, -1)
mask = input_attn_mask.gather(1, dot_attn_indices).reshape_as(dots)
dots.masked_fill_(~mask, masked_value)
del mask
# Input mask for padding in variable lengthed sequences
if input_mask is not None:
input_mask = F.pad(input_mask, (0, seqlen - input_mask.shape[1]), value=True)
mq = input_mask.gather(1, st).reshape((batch_size, chunk_size, -1))
mkv = look_one_back(mq)
mask = mq[:, :, :, None] * mkv[:, :, None, :]
dots.masked_fill_(~mask, masked_value)
del mask
# Causal masking
if self.causal:
mask = bq_t[:, :, :, None] < bkv_t[:, :, None, :]
if seqlen > query_len:
mask = mask & (bkv_t[:, :, None, :] < query_len)
dots.masked_fill_(mask, masked_value)
del mask
# Mask out attention to self except when no other targets are available.
self_mask = bq_t[:, :, :, None] == bkv_t[:, :, None, :]
dots.masked_fill_(self_mask, TOKEN_SELF_ATTN_VALUE)
del self_mask
# Mask out attention to other hash buckets.
if not self._attend_across_buckets:
bq_buckets = bkv_buckets = torch.reshape(sbuckets_and_t // seqlen, (batch_size, chunk_size, -1))
bkv_buckets = look_one_back(bkv_buckets)
bucket_mask = bq_buckets[:, :, :, None] != bkv_buckets[:, :, None, :]
dots.masked_fill_(bucket_mask, masked_value)
del bucket_mask
# Don't double-count query-key pairs across multiple rounds of hashing.
# There are two possible strategies here. (1) The default is to count how
# many times a query-key pair is repeated, and to lower its log-prob
# correspondingly at each repetition. (2) When hard_k is set, the code
# instead masks all but the first occurence of each query-key pair.
if not self._allow_duplicate_attention:
locs1 = undo_sort // bq_t.shape[-1]
locs2 = (locs1 + 1) % chunk_size
if not self._attend_across_buckets:
locs1 = buckets * chunk_size + locs1
locs2 = buckets * chunk_size + locs2
locs = torch.cat([
torch.reshape(locs1, (batch_size, total_hashes, seqlen)),
torch.reshape(locs2, (batch_size, total_hashes, seqlen)),
], 1).permute((0, 2, 1))
slocs = batched_index_select(locs, st)
b_locs = torch.reshape(slocs, (batch_size, chunk_size, -1, 2 * total_hashes))
b_locs1 = b_locs[:, :, :, None, :total_hashes]
bq_locs = b_locs1.expand(b_locs.shape[:3] + (2, total_hashes))
bq_locs = torch.reshape(bq_locs, b_locs.shape)
bkv_locs = look_one_back(b_locs)
dup_counts = (bq_locs[:, :, :, None, :] == bkv_locs[:, :, None, :, :])
# for memory considerations, chunk summation of last dimension for counting duplicates
dup_counts = chunked_sum(dup_counts, chunks=(total_hashes * batch_size))
dup_counts = dup_counts.detach()
assert dup_counts.shape == dots.shape
dots = dots - torch.log(dup_counts + 1e-9)
del dup_counts
# Softmax.
dots_logsumexp = torch.logsumexp(dots, dim=-1, keepdim=True)
dots = torch.exp(dots - dots_logsumexp).type_as(dots)
dropped_dots = self.dropout(dots)
bo = torch.einsum('buij,buje->buie', dropped_dots, bv)
so = torch.reshape(bo, (batch_size, -1, dim))
slogits = torch.reshape(dots_logsumexp, (batch_size, -1,))
# unsort logits
o = batched_index_select(so, undo_sort)
logits = slogits.gather(1, undo_sort)
o = torch.reshape(o, (batch_size, total_hashes, seqlen, dim))
logits = torch.reshape(logits, (batch_size, total_hashes, seqlen, 1))
if query_len != seqlen:
query_slice = (slice(None), slice(None), slice(0, query_len))
o, logits = o[query_slice], logits[query_slice]
probs = torch.exp(logits - torch.logsumexp(logits, dim=1, keepdim=True))
out = torch.sum(o * probs, dim=1)
attn = torch.empty(0, device=device)
# return unsorted attention weights
if self._return_attn:
attn_unsort = ((bq_t * seqlen)[:, :, :, None] + bkv_t[:, :, None, :])
attn_unsort = attn_unsort.view(batch_size * total_hashes, -1).long()
unsorted_dots = torch.zeros(batch_size * total_hashes, seqlen * seqlen, device=device)
unsorted_dots.scatter_add_(1, attn_unsort, dots.view_as(attn_unsort))
del attn_unsort
unsorted_dots = unsorted_dots.reshape(batch_size, total_hashes, seqlen, seqlen)
attn = torch.sum(unsorted_dots[:, :, 0:query_len, :] * probs, dim=1)
# return output, attention matrix, and bucket distribution
return out, attn, buckets
# simple full attention
class FullQKAttention(nn.Module):
def __init__(self, causal = False, dropout = 0.):
super().__init__()
self.causal = causal
self.dropout = nn.Dropout(dropout)
def forward(self, qk, v, query_len = None, input_mask = None, input_attn_mask = None, **kwargs):
b, seq_len, dim = qk.shape
query_len = default(query_len, seq_len)
t = query_len
q = qk[:, 0:query_len]
qk = F.normalize(qk, 2, dim=-1).type_as(q)
dot = torch.einsum('bie,bje->bij', q, qk) * (dim ** -0.5)
# qk attention requires tokens not attend to self
i = torch.arange(t)
dot[:, i, i] = TOKEN_SELF_ATTN_VALUE
masked_value = max_neg_value(dot)
# Input mask for padding in variable lengthed sequences
if input_mask is not None:
mask = input_mask[:, 0:query_len, None] * input_mask[:, None, :]
mask = F.pad(mask, (0, seq_len - mask.shape[-1]), value=True)
dot.masked_fill_(~mask, masked_value)
# Mask for post qk attention logits of the input sequence
if input_attn_mask is not None:
input_attn_mask = F.pad(input_attn_mask, (0, seq_len - input_attn_mask.shape[-1]), value=True)
dot.masked_fill_(~input_attn_mask, masked_value)
if self.causal:
i, j = torch.triu_indices(t, t, 1)
dot[:, i, j] = masked_value
dot = dot.softmax(dim=-1)
dot = self.dropout(dot)
out = torch.einsum('bij,bje->bie', dot, v)
return out, dot, torch.empty(0)
# Shared qk attention, using either full or LSH attention
class LSHSelfAttention(nn.Module):
def __init__(self, dim, heads = 8, bucket_size = 64, n_hashes = 8, causal = False, dim_head = None, attn_chunks = 1, random_rotations_per_head = False, attend_across_buckets = True, allow_duplicate_attention = True, num_mem_kv = 0, one_value_head = False, use_full_attn = False, full_attn_thres = None, return_attn = False, post_attn_dropout = 0., dropout = 0., n_local_attn_heads = 0, **kwargs):
super().__init__()
assert dim_head or (dim % heads) == 0, 'dimensions must be divisible by number of heads'
assert n_local_attn_heads < heads, 'local attention heads must be less than number of heads'
dim_head = default(dim_head, dim // heads)
dim_heads = dim_head * heads
self.dim = dim
self.heads = heads
self.dim_head = dim_head
self.attn_chunks = default(attn_chunks, 1)
self.v_head_repeats = (heads if one_value_head else 1)
v_dim = dim_heads // self.v_head_repeats
self.toqk = nn.Linear(dim, dim_heads, bias = False)
self.tov = nn.Linear(dim, v_dim, bias = False)
self.to_out = nn.Linear(dim_heads, dim)
self.bucket_size = bucket_size
self.lsh_attn = LSHAttention(bucket_size=bucket_size, n_hashes=n_hashes, causal=causal, random_rotations_per_head=random_rotations_per_head, attend_across_buckets = attend_across_buckets, allow_duplicate_attention = allow_duplicate_attention, return_attn = return_attn, dropout = dropout, **kwargs)
self.full_attn = FullQKAttention(causal=causal, dropout=dropout)
self.post_attn_dropout = nn.Dropout(post_attn_dropout)
self.use_full_attn = use_full_attn
self.full_attn_thres = default(full_attn_thres, bucket_size)
self.num_mem_kv = num_mem_kv
self.mem_kv = nn.Parameter(torch.randn(1, num_mem_kv, dim, requires_grad=True)) if num_mem_kv > 0 else None
self.n_local_attn_heads = n_local_attn_heads
self.local_attn = LocalAttention(window_size=bucket_size * 2, causal=causal, dropout=dropout, shared_qk=True, look_forward=(1 if not causal else 0))
self.callback = None
def forward(self, x, keys = None, input_mask = None, input_attn_mask = None, context_mask = None, pos_emb = None, **kwargs):
device, dtype = x.device, x.dtype
b, t, e, h, dh, m, l_h = *x.shape, self.heads, self.dim_head, self.num_mem_kv, self.n_local_attn_heads
mem_kv = default(self.mem_kv, torch.empty(b, 0, e, dtype=dtype, device=device))
mem = mem_kv.expand(b, m, -1)
keys = default(keys, torch.empty(b, 0, e, dtype=dtype, device=device))
c = keys.shape[1]
kv_len = t + m + c
use_full_attn = self.use_full_attn or kv_len <= self.full_attn_thres
x = torch.cat((x, mem, keys), dim=1)
qk = self.toqk(x)
v = self.tov(x)
v = v.repeat(1, 1, self.v_head_repeats)
def merge_heads(v):
return v.view(b, kv_len, h, -1).transpose(1, 2)
def split_heads(v):
return v.view(b, h, t, -1).transpose(1, 2).contiguous()
merge_batch_and_heads = partial(merge_dims, 0, 1)
qk, v = map(merge_heads, (qk, v))
has_local = l_h > 0
lsh_h = h - l_h
split_index_fn = partial(split_at_index, 1, l_h)
(lqk, qk), (lv, v) = map(split_index_fn, (qk, v))
lqk, qk, lv, v = map(merge_batch_and_heads, (lqk, qk, lv, v))
masks = {}
if input_mask is not None or context_mask is not None:
default_mask = torch.tensor([True], device=device)
i_mask = default(input_mask, default_mask.expand(b, t))
m_mask = default_mask.expand(b, m)
c_mask = default(context_mask, default_mask.expand(b, c))
mask = torch.cat((i_mask, m_mask, c_mask), dim=1)
mask = merge_batch_and_heads(expand_dim(1, lsh_h, mask))
masks['input_mask'] = mask
if input_attn_mask is not None:
input_attn_mask = merge_batch_and_heads(expand_dim(1, lsh_h, input_attn_mask))
masks['input_attn_mask'] = input_attn_mask
attn_fn = self.lsh_attn if not use_full_attn else self.full_attn
partial_attn_fn = partial(attn_fn, query_len = t, pos_emb = pos_emb, **kwargs)
attn_fn_in_chunks = process_inputs_chunk(partial_attn_fn, chunks = self.attn_chunks)
out, attn, buckets = attn_fn_in_chunks(qk, v, **masks)
if self.callback is not None:
self.callback(attn.reshape(b, lsh_h, t, -1), buckets.reshape(b, lsh_h, -1))
if has_local:
lqk, lv = lqk[:, :t], lv[:, :t]
local_out = self.local_attn(lqk, lqk, lv, input_mask=input_mask)
local_out = local_out.reshape(b, l_h, t, -1)
out = out.reshape(b, lsh_h, t, -1)
out = torch.cat((local_out, out), dim=1)
out = split_heads(out).view(b, t, -1)
out = self.to_out(out)
return self.post_attn_dropout(out)
# feed forward
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
# positional embeddings
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
self.emb = nn.Embedding(max_seq_len, dim)
def forward(self, x):
t = torch.arange(x.shape[1], device=x.device)
return self.emb(t)
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x, seq_dim = 1):
t = torch.arange(x.shape[seq_dim], device = x.device).type_as(self.inv_freq)
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
return emb[None, :, :].type_as(x)
# rotary positional embedding helpers
def rotate_every_two(x):
x = rearrange(x, '... (d j) -> ... d j', j = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d j -> ... (d j)')
def apply_rotary_pos_emb(qk, sinu_pos):
sinu_pos = sinu_pos.type(qk.dtype)
sinu_pos = rearrange(sinu_pos, '() n (j d) -> n j d', j = 2)
sin, cos = sinu_pos.unbind(dim = -2)
sin, cos = map(lambda t: repeat(t, 'n d -> n (d j)', j = 2), (sin, cos))
seq_len = sin.shape[0]
qk, qk_pass = qk[:, :seq_len], qk[:, seq_len:]
qk = (qk * cos) + (rotate_every_two(qk) * sin)
return torch.cat((qk, qk_pass), dim = 1)
# reformer lm
class Reformer(nn.Module):
def __init__(self, dim, depth, heads = 8, dim_head = None, bucket_size = 64, n_hashes = 8, ff_chunks = 100, attn_chunks = None, causal = False, weight_tie = False, lsh_dropout = 0., ff_dropout = 0., ff_activation = None, ff_mult = 4, ff_glu = False, post_attn_dropout = 0., layer_dropout = 0., lsh_attend_across_buckets = True, lsh_allow_duplicate_attention = True, random_rotations_per_head = False, use_scale_norm = False, use_rezero = False, use_full_attn = False, full_attn_thres = 0, reverse_thres = 0, num_mem_kv = 0, one_value_head = False, n_local_attn_heads = 0, pkm_layers = tuple(), pkm_num_keys = 128):
super().__init__()
self.dim = dim
self.depth = depth
self.bucket_size = bucket_size
self.num_mem_kv = num_mem_kv
self.full_attn_thres = full_attn_thres
get_attn = lambda: LSHSelfAttention(dim, heads, bucket_size, n_hashes, causal = causal, dim_head = dim_head, dropout = lsh_dropout, post_attn_dropout = post_attn_dropout, attn_chunks = attn_chunks, allow_duplicate_attention = lsh_allow_duplicate_attention, attend_across_buckets = lsh_attend_across_buckets, random_rotations_per_head = random_rotations_per_head, num_mem_kv = num_mem_kv, use_full_attn = use_full_attn, full_attn_thres = full_attn_thres, one_value_head = one_value_head, n_local_attn_heads = n_local_attn_heads)
get_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, activation = ff_activation, mult = ff_mult, glu = ff_glu), along_dim = -2)
get_pkm = lambda: PKM(dim, num_keys = pkm_num_keys)
if weight_tie:
get_attn, get_ff, get_pkm = map(cache_fn, (get_attn, get_ff, get_pkm))
blocks = []
norm_type = ScaleNorm if use_scale_norm else nn.LayerNorm
residual_fn_wrapper = ReZero if use_rezero else partial(PreNorm, norm_type, dim)
for ind in range(depth):
layer_num = ind + 1
use_pkm = layer_num in cast_tuple(pkm_layers)
parallel_net = None
attn = get_attn()
if use_pkm:
parallel_net = get_pkm()
else:
parallel_net = get_ff()
f = residual_fn_wrapper(attn)
g = residual_fn_wrapper(parallel_net)
blocks.append(nn.ModuleList([f, g]))
self.layers = ReversibleSequence(nn.ModuleList(blocks), layer_dropout = layer_dropout, reverse_thres = reverse_thres, send_signal = True)
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim = -1)
x = self.layers(x, **kwargs)
return torch.stack(x.chunk(2, dim=-1)).mean(dim=0)
class ReformerLM(nn.Module):
def __init__(self, num_tokens, dim, depth, max_seq_len, heads = 8, dim_head = 64, bucket_size = 64, n_hashes = 4, ff_chunks = 100, attn_chunks = 1, causal = False, weight_tie = False, lsh_dropout = 0., ff_dropout = 0., ff_mult = 4, ff_activation = None, ff_glu = False, post_attn_dropout = 0., layer_dropout = 0., random_rotations_per_head = False, use_scale_norm = False, use_rezero = False, use_full_attn = False, full_attn_thres = 0, reverse_thres = 0, num_mem_kv = 0, one_value_head = False, emb_dim = None, return_embeddings = False, weight_tie_embedding = False, fixed_position_emb = False, absolute_position_emb = False, axial_position_emb = False, axial_position_shape = None, n_local_attn_heads = 0, pkm_layers = tuple(), pkm_num_keys = 128):
super().__init__()
emb_dim = default(emb_dim, dim)
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, emb_dim)
self.to_model_dim = Identity() if emb_dim == dim else nn.Linear(emb_dim, dim)
self.pos_emb = Always(0)
self.layer_pos_emb = Always(None)
if axial_position_emb:
axial_position_shape = default(axial_position_shape, (math.ceil(max_seq_len / bucket_size), bucket_size))
self.pos_emb = AxialPositionalEmbedding(emb_dim, axial_position_shape)
elif absolute_position_emb:
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len)
elif fixed_position_emb:
self.pos_emb = FixedPositionalEmbedding(emb_dim)
else:
self.layer_pos_emb = FixedPositionalEmbedding(dim_head)
self.reformer = Reformer(dim, depth, heads = heads, dim_head = dim_head, bucket_size = bucket_size, n_hashes = n_hashes, ff_chunks = ff_chunks, attn_chunks = attn_chunks, causal = causal, weight_tie = weight_tie, lsh_dropout = lsh_dropout, ff_mult = ff_mult, ff_activation = ff_activation, ff_glu = ff_glu, ff_dropout = ff_dropout, post_attn_dropout = 0., layer_dropout = layer_dropout, random_rotations_per_head = random_rotations_per_head, use_scale_norm = use_scale_norm, use_rezero = use_rezero, use_full_attn = use_full_attn, full_attn_thres = full_attn_thres, reverse_thres = reverse_thres, num_mem_kv = num_mem_kv, one_value_head = one_value_head, n_local_attn_heads = n_local_attn_heads, pkm_layers = pkm_layers, pkm_num_keys = pkm_num_keys)
self.norm = nn.LayerNorm(dim)
if return_embeddings:
self.out = Identity()
return
self.out = nn.Sequential(
nn.Linear(dim, emb_dim) if emb_dim != dim else Identity(),
nn.Linear(emb_dim, num_tokens) if not weight_tie_embedding else MatrixMultiply(self.token_emb.weight, transpose=True, normalize=True)
)
def forward(self, x, **kwargs):
x = self.token_emb(x)
x = x + self.pos_emb(x)
layer_pos_emb = self.layer_pos_emb(x)
x = self.to_model_dim(x)
x = self.reformer(x, pos_emb = layer_pos_emb, **kwargs)
x = self.norm(x)
return self.out(x)
| reformer-pytorch-master | reformer_pytorch/reformer_pytorch.py |
import deepspeed
from reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
import argparse
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
def add_argument():
parser=argparse.ArgumentParser(description='enwik8')
parser.add_argument('--with_cuda', default=False, action='store_true',
help='use CPU in case there\'s no GPU support')
parser.add_argument('--use_ema', default=False, action='store_true',
help='whether use exponential moving average')
parser.add_argument('-b', '--batch_size', default=32, type=int,
help='mini-batch size (default: 32)')
parser.add_argument('-e', '--epochs', default=30, type=int,
help='number of total epochs (default: 30)')
parser.add_argument('--local_rank', type=int, default=-1,
help='local rank passed from distributed launcher')
parser = deepspeed.add_config_arguments(parser)
args=parser.parse_args()
return args
# constants
EPOCHS = 20
GRADIENT_ACCUMULATE_EVERY = 4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 1024
SEQ_LEN = 4096
# helpers
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = ReformerLM(
dim = 512,
depth = 6,
max_seq_len = SEQ_LEN,
num_tokens = 256,
heads = 8,
bucket_size = 64,
n_hashes = 4,
ff_chunks = 10,
lsh_dropout = 0.1,
weight_tie = True,
causal = True,
n_local_attn_heads = 4,
use_full_attn = False # set this to true for comparison with full attention
)
model = TrainingWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
# setup deepspeed
cmd_args = add_argument()
model_engine, optimizer, trainloader, _ = deepspeed.initialize(args=cmd_args, model=model, model_parameters=model.parameters(), training_data=train_dataset)
# training
for _ in range(EPOCHS):
for i, data in enumerate(trainloader):
model_engine.train()
data = data.to(model_engine.local_rank)
loss = model_engine(data, return_loss = True)
model_engine.backward(loss)
model_engine.step()
print(loss.item() * GRADIENT_ACCUMULATE_EVERY)
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
inp = random.choice(val_dataset)[:-1]
loss = model(inp[None, :].cuda(), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp.cuda(), GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| reformer-pytorch-master | examples/enwik8_deepspeed/train.py |
from reformer_pytorch import ReformerLM
from reformer_pytorch.generative_tools import TrainingWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 4096
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate model
model = ReformerLM(
dim = 512,
depth = 6,
max_seq_len = SEQ_LEN,
num_tokens = 256,
heads = 8,
bucket_size = 64,
n_hashes = 4,
ff_chunks = 10,
lsh_dropout = 0.1,
weight_tie = True,
causal = True,
n_local_attn_heads = 4,
use_full_attn = False # set this to true for comparison with full attention
)
model = TrainingWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| reformer-pytorch-master | examples/enwik8_simple/train.py |
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, random_split
from tqdm import tqdm
from reformer_pytorch import Reformer, ReformerLM
from transformers import BertTokenizer, PreTrainedTokenizer
from fairseq.optim.adafactor import Adafactor
import os
import json
import logging
from datetime import datetime
class WikiDataset(Dataset):
def __init__(self, path="", prefix="train"):
assert os.path.isdir(path)
self.documents = []
filename_list = os.listdir(path)
for file in filename_list:
path_to_file = os.path.join(path, file)
if not os.path.isfile(path_to_file):
continue
self.documents.append(path_to_file)
def __len__(self):
""" Returns the number of documents. """
return len(self.documents)
def __getitem__(self, idx):
document_path = self.documents[idx]
document_name = document_path.split("/")[-1]
items = []
with open(document_path, encoding="utf-8") as source:
raw_text = source.readlines()
for obj in raw_text:
text = json.loads(obj)['text']
text = re.sub('\\n', ' ', text)
text = re.sub('\\s+', ' ', text)
items.append(text)
return items
class ReformerTrainer(object):
def __init__(self,
dataset,
model,
tokenizer,
device=None,
train_batch_size=8,
eval_batch_size=None,
tb_writer=True,
tb_dir='./tb_logs',
log_dir='./logs'):
"""
Provides an easy to use class for pretraining and evaluating a Reformer Model.
:param dataset: (torch.utils.data.Dataset) containing all of the data you wish to utilize during training.
:param model: (reformer_pytorch.Reformer)
:param tokenizer: (transformers.PreTrainedTokenizer) defaults to BertTokenizer ('bert-base-case')
:param device: provide manual device placement. If None, will default to cuda:0 if available.
:param tb_writer: (bool) Whether to write to tensorboard or not.
:param tb_dir: (str) Where to write TB logs to.
:param log_dir: (str) Where to write generic logs to.
"""
self.dataset = dataset
self.model = model
self.tokenizer = tokenizer
self.device = device
self.n_gpu = torch.cuda.device_count() if torch.cuda.is_available() else 0
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.tb_writer = tb_writer
self.log_dir = log_dir
if tokenizer is None:
self.tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
if device is None:
self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
if eval_batch_size is None:
self.eval_batch_size = train_batch_size
if tb_writer:
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter(log_dir=tb_dir)
logging.basicConfig(filename=f'{log_dir}/{datetime.now().date()}.log', level=logging.INFO)
def build_dataloaders(self, train_test_split=0.1, train_shuffle=True, eval_shuffle=True):
"""
Builds the Training and Eval DataLoaders
:param train_test_split: The ratio split of test to train data.
:param train_shuffle: (bool) True if you wish to shuffle the train_dataset.
:param eval_shuffle: (bool) True if you wish to shuffle the eval_dataset.
:return: train dataloader and evaluation dataloader.
"""
dataset_len = len(self.dataset)
eval_len = int(dataset_len * train_test_split)
train_len = dataset_len - eval_len
train_dataset, eval_dataset = random_split(self.dataset, (train_len, eval_len))
train_loader = DataLoader(train_dataset, batch_size=self.train_batch_size, shuffle=train_shuffle)
eval_loader = DataLoader(eval_dataset, batch_size=self.eval_batch_size, shuffle=eval_shuffle)
logging.info(f'''train_dataloader size: {len(train_loader.dataset)} | shuffle: {train_shuffle}
eval_dataloader size: {len(eval_loader.dataset)} | shuffle: {eval_shuffle}''')
return train_loader, eval_loader
def mask_tokens(self, inputs: torch.Tensor, mlm_probability=0.15, pad=True):
""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
labels = inputs.clone()
# mlm_probability defaults to 0.15 in Bert
probability_matrix = torch.full(labels.shape, mlm_probability)
special_tokens_mask = [
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if self.tokenizer._pad_token is not None:
padding_mask = labels.eq(self.tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = torch.bernoulli(probability_matrix).bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
inputs[indices_random] = random_words[indices_random]
if pad:
input_pads = self.tokenizer.max_len - inputs.shape[-1]
label_pads = self.tokenizer.max_len - labels.shape[-1]
inputs = F.pad(inputs, pad=(0, input_pads), value=self.tokenizer.pad_token_id)
labels = F.pad(labels, pad=(0, label_pads), value=self.tokenizer.pad_token_id)
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return inputs, labels
def _tokenize_input_ids(self, input_ids: list, pad_to_max_length: bool = True):
"""
Helper function to clean up the train and eval functions
:param input_ids: inputs to tokenize.
:param pad_to_max_length: Whether you want to pad the inputs to the tokenizer.max_len
:return: Tensor containing training data.
"""
inputs = torch.cat(
[
self.tokenizer.encode(
input_ids[i],
add_special_tokens=True,
max_length=self.tokenizer.max_len,
pad_to_max_length=pad_to_max_length,
return_tensors='pt'
) \
for i in range(len(input_ids))
]
)
return inputs
def train(self,
epochs,
train_dataloader,
eval_dataloader,
log_steps,
ckpt_steps,
ckpt_dir=None,
gradient_accumulation_steps=1):
"""
Trains the Reformer Model
:param epochs: The number of times you wish to loop through the dataset.
:param train_dataloader: (torch.utils.data.DataLoader) The data to train on.
:param eval_dataloader: (torch.utils.data.DataLoader) The data to evaluate on.
:param log_steps: The number of steps to iterate before logging.
:param ckpt_steps: The number of steps to iterate before checkpointing.
:param ckpt_dir: The directory to save the checkpoints to.
:param gradient_accumulation_steps: Optional gradient accumulation.
:return: Total number of steps, total loss, model
"""
optimizer = Adafactor(self.model.parameters())
loss_fn = nn.CrossEntropyLoss()
losses = {}
global_steps = 0
local_steps = 0
step_loss = 0.0
if ckpt_dir is not None:
assert os.path.isdir(ckpt_dir)
try:
logging.info(f'{datetime.now()} | Continuing from checkpoint...')
self.model.load_state_dict(torch.load(f'{ckpt_dir}/model_state_dict.pt', map_location=self.device))
optimizer.load_state_dict(torch.load(f'{ckpt_dir}/optimizer_state_dict.pt'))
except Exception as e:
logging.info(f'{datetime.now()} | No checkpoint was found | {e}')
self.model.train()
if self.n_gpu > 1:
self.model = nn.DataParallel(self.model)
logging.info(f'{datetime.now()} | Utilizing {self.n_gpu} GPUs')
self.model.to(self.device)
logging.info(f'{datetime.now()} | Moved model to: {self.device}')
logging.info(
f'{datetime.now()} | train_batch_size: {self.train_batch_size} | eval_batch_size: {self.eval_batch_size}')
logging.info(f'{datetime.now()} | Epochs: {epochs} | log_steps: {log_steps} | ckpt_steps: {ckpt_steps}')
logging.info(f'{datetime.now()} | gradient_accumulation_steps: {gradient_accumulation_steps}')
for epoch in tqdm(range(epochs), desc='Epochs', position=0):
logging.info(f'{datetime.now()} | Epoch: {epoch}')
for step, batch in tqdm(enumerate(train_dataloader),
desc='Epoch Iterator',
position=1,
leave=True,
total=len(train_dataloader)):
for data in batch:
inputs = self._tokenize_input_ids(data, pad_to_max_length=True)
inputs, labels = self.mask_tokens(inputs)
inputs, labels = inputs.to(self.device), labels.to(self.device)
output = self.model(inputs)
# only calculating loss on masked tokens
loss_mx = labels != -100
output = output[loss_mx].view(-1, self.tokenizer.vocab_size)
labels = labels[loss_mx].view(-1)
loss = loss_fn(output, labels)
if gradient_accumulation_steps > 1:
loss /= gradient_accumulation_steps
loss.backward()
step_loss += loss.item()
losses[global_steps] = loss.item()
local_steps += 1
global_steps += 1
if global_steps % gradient_accumulation_steps == 0:
optimizer.step()
self.model.zero_grad()
if global_steps % log_steps == 0:
if self.tb_writer:
self.writer.add_scalar('Train/Loss', step_loss / local_steps, global_steps)
self.writer.close()
logging.info(
f'''{datetime.now()} | Train Loss: {step_loss / local_steps} | Steps: {global_steps}''')
with open(f'{self.log_dir}/train_results.json', 'w') as results_file:
json.dump(losses, results_file)
results_file.close()
step_loss = 0.0
local_steps = 0
if global_steps % ckpt_steps == 0:
# evaluating before every checkpoint
self.evaluate(eval_dataloader)
model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
torch.save(model_to_save.state_dict(), f'{ckpt_dir}/model_state_dict.pt')
torch.save(optimizer.state_dict(), f'{ckpt_dir}/optimizer_state_dict.pt')
logging.info(f'{datetime.now()} | Saved checkpoint to: {ckpt_dir}')
model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
torch.save(model_to_save.state_dict(), f'{ckpt_dir}/model_state_dict.pt')
torch.save(optimizer.state_dict(), f'{ckpt_dir}/optimizer_state_dict.pt')
return self.model
def evaluate(self, dataloader):
"""
Runs through the provided dataloader with torch.no_grad()
:param dataloader: (torch.utils.data.DataLoader) Evaluation DataLoader
:return: None
"""
loss_fn = nn.CrossEntropyLoss()
if self.n_gpu > 1 and not isinstance(self.model, nn.DataParallel):
self.model = nn.DataParallel(self.model)
self.model.eval()
eval_loss = 0.0
perplexity = 0.0
eval_steps = 0
logging.info(f'{datetime.now()} | Evaluating...')
for step, batch in tqdm(enumerate(dataloader), desc='Evaluating', leave=True, total=len(dataloader)):
for data in batch:
inputs = self._tokenize_input_ids(data, pad_to_max_length=True)
inputs, labels = self.mask_tokens(inputs)
inputs, labels = inputs.to(self.device), labels.to(self.device)
with torch.no_grad():
output = self.model(inputs)
loss_mx = labels != -100
output_ids = output[loss_mx].view(-1, self.tokenizer.vocab_size)
labels = labels[loss_mx].view(-1)
tmp_eval_loss = loss_fn(output_ids, labels)
tmp_perplexity = torch.exp(tmp_eval_loss)
if self.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
perplexity += tmp_perplexity.item()
eval_steps += 1
eval_loss /= eval_steps
perplexity /= eval_steps
if self.tb_writer:
self.writer.add_scalar('Eval/Loss', eval_loss, eval_steps)
self.writer.close()
self.writer.add_scalar('Perplexity', perplexity, eval_steps)
self.writer.close()
logging.info(f'{datetime.now()} | Step: {step} | Eval Loss: {eval_loss} | Perplexity: {perplexity}')
return None
if __name__ == '__main__':
dataset = WikiDataset(path='D:/data/enwiki')
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
tokenizer.max_len = 128
model = ReformerLM(
num_tokens=tokenizer.vocab_size,
dim=512,
depth=6,
heads=8,
max_seq_len=tokenizer.max_len,
causal=True
)
trainer = ReformerTrainer(dataset, model, tokenizer, train_batch_size=32, eval_batch_size=32)
train_dataloader, eval_dataloader = trainer.build_dataloaders(train_test_split=0.90)
model = trainer.train(epochs=3,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
log_steps=10,
ckpt_steps=100,
ckpt_dir='./ckpts',
gradient_accumulation_steps=1)
torch.save(model, './ckpts/model.bin')
| reformer-pytorch-master | pretraining/self-supervised.py |
from setuptools import setup, find_packages
setup(
name = 'tranception-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.8',
license='MIT',
description = 'Tranception - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/tranception-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'protein fitness'
],
install_requires=[
'einops>=0.4',
'einops-exts',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| tranception-pytorch-main | setup.py |
from tranception_pytorch.tranception_pytorch import Tranception
| tranception-pytorch-main | tranception_pytorch/__init__.py |
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange
from einops_exts import rearrange_many
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# relative positional bias
class LearnedAlibiPosBias(nn.Module):
def __init__(self, heads):
super().__init__()
self.heads = heads
slopes = torch.Tensor(self._get_slopes(heads))
slopes = rearrange(slopes, 'h -> h 1 1')
self.slopes = nn.Parameter(slopes)
self.register_buffer('bias', None, persistent = False)
def get_bias(self, i, j, device):
i_arange = torch.arange(i, device = device)
j_arange = torch.arange(j, device = device)
bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1'))
return bias
@staticmethod
def _get_slopes(heads):
def get_slopes_power_of_2(n):
start = (2**(-2**-(math.log2(n)-3)))
ratio = start
return [start*ratio**i for i in range(n)]
if math.log2(heads).is_integer():
return get_slopes_power_of_2(heads)
closest_power_of_2 = 2 ** math.floor(math.log2(heads))
return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2]
def forward(self, qk_sim):
h, i, j, device = *qk_sim.shape[-3:], qk_sim.device
if exists(self.bias) and self.bias.shape[-1] >= j:
return self.bias[..., :i, :j]
bias = self.get_bias(i, j, device)
bias = bias * self.slopes
num_heads_unalibied = h - bias.shape[0]
bias = F.pad(bias, (0, 0, 0, 0, 0, num_heads_unalibied))
self.register_buffer('bias', bias, persistent = False)
return bias
# helper classes
class ReluSquared(nn.Module):
""" found with neural architecture search in Primer paper """
def forward(self, x):
return F.relu(x) ** 2
def FeedForward(dim, mult = 4):
hidden_dim = int(dim * mult)
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, hidden_dim),
ReluSquared(),
nn.Linear(hidden_dim, dim)
)
class DepthwiseConv1d(nn.Module):
def __init__(self, dim, kernel_size, causal = True):
super().__init__()
assert (kernel_size % 2) == 1
self.padding = (kernel_size - 1, 0) if causal else (kernel_size // 2, kernel_size // 2)
self.conv = nn.Conv1d(dim, dim, kernel_size = kernel_size, groups = dim)
def forward(self, x):
x = F.pad(x, self.padding)
return self.conv(x)
class Attention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
causal = False,
ds_conv_kernel_sizes = (0, 3, 5, 7) # heads were grouped into 4 groups and given a depthwise conv after the queries / keys / values projection
):
super().__init__()
self.groups = len(ds_conv_kernel_sizes)
assert heads >= self.groups and (heads % self.groups) == 0, f'heads must be greater than {self.groups} and divisible by {self.groups}'
self.scale = dim_head ** -0.5
self.causal = causal
self.heads = heads
self.heads_per_group = heads // self.groups
inner_dim = heads * dim_head
self.norm = nn.LayerNorm(dim)
self.to_qkv = nn.Conv1d(dim, inner_dim * 3, 1, bias = False)
# ds convs with different kernel sizes for 4 groups of heads
self.qkv_ds_convs = nn.ModuleList([])
for _ in range(3): # for queries, keys, values
ds_convs = nn.ModuleList([])
for kernel_size in ds_conv_kernel_sizes:
if kernel_size == 0:
ds_convs.append(nn.Identity())
continue
ds_convs.append(DepthwiseConv1d(dim_head * self.heads_per_group, kernel_size, causal = causal))
self.qkv_ds_convs.append(ds_convs)
# learned alibi positional bias for 4 groups of heads
self.learned_alibi_pos_biases = nn.ModuleList([LearnedAlibiPosBias(heads = self.heads_per_group) for _ in range(self.groups)])
# outward projection
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x):
device, heads_per_group = x.device, self.heads_per_group
x = self.norm(x)
x = rearrange(x, 'b n d -> b d n')
q, k, v = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = rearrange_many((q, k, v), 'b (h d) n -> b h d n', h = self.heads)
# apply causal depthwise conv to queries, keys, values (a la Primer) with different kernel sizes across 4 groups of heads
def apply_causal_ds_conv_to_grouped_heads(args):
projs, ds_convs = args
batch = projs.shape[0]
projs = rearrange_many(projs.split(heads_per_group, dim = 1), 'b h d n -> b (h d) n')
conv_out = [fn(t) for fn, t in zip(ds_convs, projs)]
conv_out = map(lambda t: rearrange(t, 'b (h d) n -> b h d n', h = heads_per_group), conv_out)
conv_out = torch.cat(tuple(conv_out), dim = 1)
return rearrange(conv_out, 'b h d n -> b h n d')
q, k, v = map(apply_causal_ds_conv_to_grouped_heads, zip((q, k, v), self.qkv_ds_convs))
# scale and similarity
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
# learned alibi pos bias across 4 groups of heads
# so heads specialize to looking at different distances of kmers
grouped_sims = sim.split(self.heads // self.groups, dim = 1)
grouped_sims = [(alibi(sim_group) + sim_group) for alibi, sim_group in zip(self.learned_alibi_pos_biases, grouped_sims)]
sim = torch.cat(grouped_sims, dim = 1)
# causal mask
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention, but of course
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# classes
class Tranception(nn.Module):
def __init__(
self,
*,
dim,
depth,
num_tokens = 21,
heads = 8,
dim_head = 64,
ff_mult = 4,
ds_conv_kernel_sizes = (0, 3, 5, 7),
causal = True
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(dim = dim, heads = heads, dim_head = dim_head, ds_conv_kernel_sizes = ds_conv_kernel_sizes, causal = causal),
FeedForward(dim, mult = ff_mult)
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(
self,
x,
mask = None
):
x = self.token_emb(x)
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return self.to_logits(x)
| tranception-pytorch-main | tranception_pytorch/tranception_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'g-mlp-pytorch',
packages = find_packages(),
version = '0.1.5',
license='MIT',
description = 'gMLP - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/g-mlp-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'multi-layered-preceptrons'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| g-mlp-pytorch-main | setup.py |
from g_mlp_pytorch import gMLP
from g_mlp_pytorch.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 768
SEQ_LEN = 768
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = gMLP(
num_tokens = 256,
dim = 512,
seq_len = SEQ_LEN,
depth = 8,
causal = True
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
X = np.fromstring(file.read(int(95e6)), dtype=np.uint8)
trX, vaX = np.split(X, [int(90e6)])
data_train, data_val = torch.from_numpy(trX), torch.from_numpy(vaX)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10., desc='training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
| g-mlp-pytorch-main | train.py |
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
device = start_tokens.device
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
out = start_tokens
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
logits = self.net(x, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
return out
def forward(self, x, **kwargs):
xi, xo = x[:, :-1], x[:, 1:]
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
| g-mlp-pytorch-main | g_mlp_pytorch/autoregressive_wrapper.py |
from g_mlp_pytorch.g_mlp_pytorch import gMLP, gMLPVision, gMLPBlock, SpatialGatingUnit
| g-mlp-pytorch-main | g_mlp_pytorch/__init__.py |
from random import randrange
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange, Reduce
# functions
def exists(val):
return val is not None
def pair(val):
return (val, val) if not isinstance(val, tuple) else val
def dropout_layers(layers, prob_survival):
if prob_survival == 1:
return layers
num_layers = len(layers)
to_drop = torch.zeros(num_layers).uniform_(0., 1.) > prob_survival
# make sure at least one layer makes it
if all(to_drop):
rand_index = randrange(num_layers)
to_drop[rand_index] = False
layers = [layer for (layer, drop) in zip(layers, to_drop) if not drop]
return layers
def shift(t, amount, mask = None):
if amount == 0:
return t
return F.pad(t, (0, 0, amount, -amount), value = 0.)
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class PreShiftTokens(nn.Module):
def __init__(self, shifts, fn):
super().__init__()
self.fn = fn
self.shifts = tuple(shifts)
def forward(self, x, **kwargs):
if self.shifts == (0,):
return self.fn(x, **kwargs)
shifts = self.shifts
segments = len(shifts)
feats_per_shift = x.shape[-1] // segments
splitted = x.split(feats_per_shift, dim = -1)
segments_to_shift, rest = splitted[:segments], splitted[segments:]
segments_to_shift = list(map(lambda args: shift(*args), zip(segments_to_shift, shifts)))
x = torch.cat((*segments_to_shift, *rest), dim = -1)
return self.fn(x, **kwargs)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class Attention(nn.Module):
def __init__(self, dim_in, dim_out, dim_inner, causal = False):
super().__init__()
self.scale = dim_inner ** -0.5
self.causal = causal
self.to_qkv = nn.Linear(dim_in, dim_inner * 3, bias = False)
self.to_out = nn.Linear(dim_inner, dim_out)
def forward(self, x):
device = x.device
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
if self.causal:
mask = torch.ones(sim.shape[-2:], device = device).triu(1).bool()
sim.masked_fill_(mask[None, ...], -torch.finfo(q.dtype).max)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
return self.to_out(out)
class SpatialGatingUnit(nn.Module):
def __init__(
self,
dim,
dim_seq,
causal = False,
act = nn.Identity(),
heads = 1,
init_eps = 1e-3,
circulant_matrix = False
):
super().__init__()
dim_out = dim // 2
self.heads = heads
self.causal = causal
self.norm = nn.LayerNorm(dim_out)
self.act = act
# parameters
if circulant_matrix:
self.circulant_pos_x = nn.Parameter(torch.ones(heads, dim_seq))
self.circulant_pos_y = nn.Parameter(torch.ones(heads, dim_seq))
self.circulant_matrix = circulant_matrix
shape = (heads, dim_seq,) if circulant_matrix else (heads, dim_seq, dim_seq)
weight = torch.zeros(shape)
self.weight = nn.Parameter(weight)
init_eps /= dim_seq
nn.init.uniform_(self.weight, -init_eps, init_eps)
self.bias = nn.Parameter(torch.ones(heads, dim_seq))
def forward(self, x, gate_res = None):
device, n, h = x.device, x.shape[1], self.heads
res, gate = x.chunk(2, dim = -1)
gate = self.norm(gate)
weight, bias = self.weight, self.bias
if self.circulant_matrix:
# build the circulant matrix
dim_seq = weight.shape[-1]
weight = F.pad(weight, (0, dim_seq), value = 0)
weight = repeat(weight, '... n -> ... (r n)', r = dim_seq)
weight = weight[:, :-dim_seq].reshape(h, dim_seq, 2 * dim_seq - 1)
weight = weight[:, :, (dim_seq - 1):]
# give circulant matrix absolute position awareness
pos_x, pos_y = self.circulant_pos_x, self.circulant_pos_y
weight = weight * rearrange(pos_x, 'h i -> h i ()') * rearrange(pos_y, 'h j -> h () j')
if self.causal:
weight, bias = weight[:, :n, :n], bias[:, :n]
mask = torch.ones(weight.shape[-2:], device = device).triu_(1).bool()
mask = rearrange(mask, 'i j -> () i j')
weight = weight.masked_fill(mask, 0.)
gate = rearrange(gate, 'b n (h d) -> b h n d', h = h)
gate = einsum('b h n d, h m n -> b h m d', gate, weight)
gate = gate + rearrange(bias, 'h n -> () h n ()')
gate = rearrange(gate, 'b h n d -> b n (h d)')
if exists(gate_res):
gate = gate + gate_res
return self.act(gate) * res
class gMLPBlock(nn.Module):
def __init__(
self,
*,
dim,
dim_ff,
seq_len,
heads = 1,
attn_dim = None,
causal = False,
act = nn.Identity(),
circulant_matrix = False
):
super().__init__()
self.proj_in = nn.Sequential(
nn.Linear(dim, dim_ff),
nn.GELU()
)
self.attn = Attention(dim, dim_ff // 2, attn_dim, causal) if exists(attn_dim) else None
self.sgu = SpatialGatingUnit(dim_ff, seq_len, causal, act, heads, circulant_matrix = circulant_matrix)
self.proj_out = nn.Linear(dim_ff // 2, dim)
def forward(self, x):
gate_res = self.attn(x) if exists(self.attn) else None
x = self.proj_in(x)
x = self.sgu(x, gate_res = gate_res)
x = self.proj_out(x)
return x
# main classes
class gMLP(nn.Module):
def __init__(
self,
*,
num_tokens = None,
dim,
depth,
seq_len,
heads = 1,
ff_mult = 4,
attn_dim = None,
prob_survival = 1.,
causal = False,
circulant_matrix = False,
shift_tokens = 0,
act = nn.Identity()
):
super().__init__()
assert (dim % heads) == 0, 'dimension must be divisible by number of heads'
dim_ff = dim * ff_mult
self.seq_len = seq_len
self.prob_survival = prob_survival
self.to_embed = nn.Embedding(num_tokens, dim) if exists(num_tokens) else nn.Identity()
token_shifts = tuple(range(0 if causal else -shift_tokens, shift_tokens + 1))
self.layers = nn.ModuleList([Residual(PreNorm(dim, PreShiftTokens(token_shifts, gMLPBlock(dim = dim, heads = heads, dim_ff = dim_ff, seq_len = seq_len, attn_dim = attn_dim, causal = causal, act = act, circulant_matrix = circulant_matrix)))) for i in range(depth)])
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
) if exists(num_tokens) else nn.Identity()
def forward(self, x):
x = self.to_embed(x)
layers = self.layers if not self.training else dropout_layers(self.layers, self.prob_survival)
out = nn.Sequential(*layers)(x)
return self.to_logits(out)
class gMLPVision(nn.Module):
def __init__(
self,
*,
image_size,
patch_size,
num_classes,
dim,
depth,
heads = 1,
ff_mult = 4,
channels = 3,
attn_dim = None,
prob_survival = 1.
):
super().__init__()
assert (dim % heads) == 0, 'dimension must be divisible by number of heads'
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
assert (image_height % patch_height) == 0 and (image_width % patch_width) == 0, 'image height and width must be divisible by patch size'
num_patches = (image_height // patch_height) * (image_width // patch_width)
dim_ff = dim * ff_mult
self.to_patch_embed = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (c p1 p2)', p1 = patch_height, p2 = patch_width),
nn.Linear(channels * patch_height * patch_width, dim)
)
self.prob_survival = prob_survival
self.layers = nn.ModuleList([Residual(PreNorm(dim, gMLPBlock(dim = dim, heads = heads, dim_ff = dim_ff, seq_len = num_patches, attn_dim = attn_dim))) for i in range(depth)])
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
Reduce('b n d -> b d', 'mean'),
nn.Linear(dim, num_classes)
)
def forward(self, x):
x = self.to_patch_embed(x)
layers = self.layers if not self.training else dropout_layers(self.layers, self.prob_survival)
x = nn.Sequential(*layers)(x)
return self.to_logits(x)
| g-mlp-pytorch-main | g_mlp_pytorch/g_mlp_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'charformer-pytorch',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Charformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/charformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'learned tokenization'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| charformer-pytorch-main | setup.py |
from charformer_pytorch.charformer_pytorch import GBST
| charformer-pytorch-main | charformer_pytorch/__init__.py |
import math
from math import gcd
import functools
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
# helpers
def exists(val):
return val is not None
def lcm(*numbers):
return int(functools.reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1))
def masked_mean(tensor, mask, dim = -1):
diff_len = len(tensor.shape) - len(mask.shape)
mask = mask[(..., *((None,) * diff_len))]
tensor.masked_fill_(~mask, 0.)
total_el = mask.sum(dim = dim)
mean = tensor.sum(dim = dim) / total_el.clamp(min = 1.)
mean.masked_fill_(total_el == 0, 0.)
return mean
def next_divisible_length(seqlen, multiple):
return math.ceil(seqlen / multiple) * multiple
def pad_to_multiple(tensor, multiple, *, seq_dim, dim = -1, value = 0.):
seqlen = tensor.shape[seq_dim]
length = next_divisible_length(seqlen, multiple)
if length == seqlen:
return tensor
remainder = length - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value = value)
# helper classes
class Pad(nn.Module):
def __init__(self, padding, value = 0.):
super().__init__()
self.padding = padding
self.value = value
def forward(self, x):
return F.pad(x, self.padding, value = self.value)
class DepthwiseConv1d(nn.Module):
def __init__(self, dim_in, dim_out, kernel_size):
super().__init__()
self.conv = nn.Conv1d(dim_in, dim_out, kernel_size, groups = dim_in)
self.proj_out = nn.Conv1d(dim_out, dim_out, 1)
def forward(self, x):
x = self.conv(x)
return self.proj_out(x)
# main class
class GBST(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
max_block_size = None,
blocks = None,
downsample_factor = 4,
score_consensus_attn = True
):
super().__init__()
assert exists(max_block_size) ^ exists(blocks), 'either max_block_size or blocks are given on initialization'
self.token_emb = nn.Embedding(num_tokens, dim)
if exists(blocks):
assert isinstance(blocks, tuple), 'blocks must be a tuple of block sizes'
self.blocks = tuple(map(lambda el: el if isinstance(el, tuple) else (el, 0), blocks))
assert all([(offset < block_size) for block_size, offset in self.blocks]), 'offset must be always smaller than the block size'
max_block_size = max(list(map(lambda t: t[0], self.blocks)))
else:
self.blocks = tuple(map(lambda el: (el, 0), range(1, max_block_size + 1)))
self.pos_conv = nn.Sequential(
Pad((0, 0, 0, max_block_size - 1)),
Rearrange('b n d -> b d n'),
DepthwiseConv1d(dim, dim, kernel_size = max_block_size),
Rearrange('b d n -> b n d')
)
self.score_fn = nn.Sequential(
nn.Linear(dim, 1),
Rearrange('... () -> ...')
)
self.score_consensus_attn = score_consensus_attn
assert downsample_factor <= max_block_size, 'final downsample factor should be less than the maximum block size'
self.block_pad_multiple = lcm(*[block_size for block_size, _ in self.blocks])
self.downsample_factor = downsample_factor
def forward(self, x, mask = None):
b, n, block_mult, ds_factor, device = *x.shape, self.block_pad_multiple, self.downsample_factor, x.device
m = next_divisible_length(n, ds_factor)
# get character token embeddings
x = self.token_emb(x)
# do a conv to generate the positions for the tokens
x = self.pos_conv(x)
# pad both sequence and mask to length visibile by all block sizes from 0 to max block size
x = pad_to_multiple(x, block_mult, seq_dim = 1, dim = -2)
if exists(mask):
mask = pad_to_multiple(mask, block_mult, seq_dim = 1, dim = -1, value = False)
# compute representations for all blocks by mean pooling
block_masks = []
block_reprs = []
for block_size, offset in self.blocks:
# clone the input sequence as well as the mask, in order to pad for offsets
block_x = x.clone()
if exists(mask):
block_mask = mask.clone()
# pad for offsets, if needed
need_padding = offset > 0
if need_padding:
left_offset, right_offset = (block_size - offset), offset
block_x = F.pad(block_x, (0, 0, left_offset, right_offset), value = 0.)
if exists(mask):
block_mask = F.pad(block_mask, (left_offset, right_offset), value = False)
# group input sequence into blocks
blocks = rearrange(block_x, 'b (n m) d -> b n m d', m = block_size)
# either mean pool the blocks, or do a masked mean
if exists(mask):
mask_blocks = rearrange(block_mask, 'b (n m) -> b n m', m = block_size)
block_repr = masked_mean(blocks, mask_blocks, dim = -2)
else:
block_repr = blocks.mean(dim = -2)
# append the block representations, as well as the pooled block masks
block_repr = repeat(block_repr, 'b n d -> b (n m) d', m = block_size)
if need_padding:
block_repr = block_repr[:, left_offset:-right_offset]
block_reprs.append(block_repr)
if exists(mask):
mask_blocks = torch.any(mask_blocks, dim = -1)
mask_blocks = repeat(mask_blocks, 'b n -> b (n m)', m = block_size)
if need_padding:
mask_blocks = mask_blocks[:, left_offset:-right_offset]
block_masks.append(mask_blocks)
# stack all the block representations
block_reprs = torch.stack(block_reprs, dim = 2)
# calculate scores and softmax across the block size dimension
scores = self.score_fn(block_reprs)
if exists(mask):
block_masks = torch.stack(block_masks, dim = 2)
max_neg_value = -torch.finfo(scores.dtype).max
scores = scores.masked_fill(~block_masks, max_neg_value)
scores = scores.softmax(dim = 2)
# do the cheap consensus attention, eq (5) in paper
if self.score_consensus_attn:
score_sim = einsum('b i d, b j d -> b i j', scores, scores)
if exists(mask):
cross_mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')
max_neg_value = -torch.finfo(score_sim.dtype).max
score_sim = score_sim.masked_fill(~cross_mask, max_neg_value)
score_attn = score_sim.softmax(dim = -1)
scores = einsum('b i j, b j m -> b i m', score_attn, scores)
# multiply the block representations by the position-wise scores
scores = rearrange(scores, 'b n m -> b n m ()')
x = (block_reprs * scores).sum(dim = 2)
# truncate to length divisible by downsample factor
x = x[:, :m]
if exists(mask):
mask = mask[:, :m]
# final mean pooling downsample
x = rearrange(x, 'b (n m) d -> b n m d', m = ds_factor)
if exists(mask):
mask = rearrange(mask, 'b (n m) -> b n m', m = ds_factor)
x = masked_mean(x, mask, dim = 2)
mask = torch.any(mask, dim = -1)
else:
x = x.mean(dim = -2)
return x, mask
| charformer-pytorch-main | charformer_pytorch/charformer_pytorch.py |
from setuptools import setup, find_packages
setup(
name = 'retrieval-augmented-ddpm',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Retrieval-Augmented Denoising Diffusion Probabilistic Models',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/retrieval-augmented-ddpm',
keywords = [
'artificial intelligence',
'deep learning',
'denoising diffusion',
'retrieval'
],
install_requires=[
'clip-retrieval',
'einops>=0.4',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| retrieval-augmented-ddpm-main | setup.py |
retrieval-augmented-ddpm-main | retrieval_augmented_ddpm/retrieval_augmented_ddpm.py |
|
retrieval-augmented-ddpm-main | retrieval_augmented_ddpm/__init__.py |
|
import argparse
from pathlib import Path
from tqdm import tqdm
# torch
import torch
from einops import repeat
# vision imports
from PIL import Image
from torchvision.utils import make_grid, save_image
# dalle related classes and utils
from dalle_pytorch import __version__
from dalle_pytorch import DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE, DALLE
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, YttmTokenizer, ChineseTokenizer
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--dalle_path', type = str, required = True,
help='path to your trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default = None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default = None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
parser.add_argument('--text', type = str, required = True,
help='your text prompt')
parser.add_argument('--num_images', type = int, default = 128, required = False,
help='number of images')
parser.add_argument('--batch_size', type = int, default = 4, required = False,
help='batch size')
parser.add_argument('--top_k', type = float, default = 0.9, required = False,
help='top k filter threshold')
parser.add_argument('--outputs_dir', type = str, default = './outputs', required = False,
help='output directory')
parser.add_argument('--bpe_path', type = str,
help='path to your huggingface BPE json file')
parser.add_argument('--hug', dest='hug', action = 'store_true')
parser.add_argument('--chinese', dest='chinese', action = 'store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--gentxt', dest='gentxt', action='store_true')
args = parser.parse_args()
# helper fns
def exists(val):
return val is not None
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# load DALL-E
dalle_path = Path(args.dalle_path)
assert dalle_path.exists(), 'trained DALL-E must exist'
load_obj = torch.load(str(dalle_path))
dalle_params, vae_params, weights, vae_class_name, version = load_obj.pop('hparams'), load_obj.pop('vae_params'), load_obj.pop('weights'), load_obj.pop('vae_class_name', None), load_obj.pop('version', None)
# friendly print
if exists(version):
print(f'Loading a model trained with DALLE-pytorch version {version}')
else:
print('You are loading a model trained on an older version of DALL-E pytorch - it may not be compatible with the most recent version')
# load VAE
if args.taming:
vae = VQGanVAE(args.vqgan_model_path, args.vqgan_config_path)
elif vae_params is not None:
vae = DiscreteVAE(**vae_params)
else:
vae = OpenAIDiscreteVAE()
assert not (exists(vae_class_name) and vae.__class__.__name__ != vae_class_name), f'you trained DALL-E using {vae_class_name} but are trying to generate with {vae.__class__.__name__} - please make sure you are passing in the correct paths and settings for the VAE to use for generation'
# reconstitute DALL-E
dalle = DALLE(vae = vae, **dalle_params).cuda()
dalle.load_state_dict(weights)
# generate images
image_size = vae.image_size
texts = args.text.split('|')
for j, text in tqdm(enumerate(texts)):
if args.gentxt:
text_tokens, gen_texts = dalle.generate_texts(tokenizer, text=text, filter_thres = args.top_k)
text = gen_texts[0]
else:
text_tokens = tokenizer.tokenize([text], dalle.text_seq_len).cuda()
text_tokens = repeat(text_tokens, '() n -> b n', b = args.num_images)
outputs = []
for text_chunk in tqdm(text_tokens.split(args.batch_size), desc = f'generating images for - {text}'):
output = dalle.generate_images(text_chunk, filter_thres = args.top_k)
outputs.append(output)
outputs = torch.cat(outputs)
# save all images
file_name = text
outputs_dir = Path(args.outputs_dir) / file_name.replace(' ', '_')[:(100)]
outputs_dir.mkdir(parents = True, exist_ok = True)
for i, image in tqdm(enumerate(outputs), desc = 'saving images'):
save_image(image, outputs_dir / f'{i}.png', normalize=True)
with open(outputs_dir / 'caption.txt', 'w') as f:
f.write(file_name)
print(f'created {args.num_images} images at "{str(outputs_dir)}"')
| DALLE-pytorch-main | generate.py |