python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
# classes
class Attention(nn.Module):
def __init__(
self,
dim,
heads = 8,
dim_head = 64,
and_self_attend = False
):
super().__init__()
inner_dim = heads * dim_head
self.heads = heads
self.scale = dim_head ** -0.5
self.and_self_attend = and_self_attend
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(
self,
x,
context,
mask = None
):
h, scale = self.heads, self.scale
if self.and_self_attend:
context = torch.cat((x, context), dim = -2)
if exists(mask):
mask = F.pad(mask, (x.shape[-2], 0), value = True)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
dots = einsum('b h i d, b h j d -> b h i j', q, k) * scale
if exists(mask):
mask_value = -torch.finfo(dots.dtype).max
mask = rearrange(mask, 'b n -> b 1 1 n')
dots.masked_fill_(~mask, mask_value)
attn = dots.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)', h = h)
return self.to_out(out)
class ISAB(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
num_latents = None,
latent_self_attend = False
):
super().__init__()
self.latents = nn.Parameter(torch.randn(num_latents, dim)) if exists(num_latents) else None
self.attn1 = Attention(dim, heads, and_self_attend = latent_self_attend)
self.attn2 = Attention(dim, heads)
def forward(self, x, latents = None, mask = None):
b, *_ = x.shape
assert exists(latents) ^ exists(self.latents), 'you can only either learn the latents within the module, or pass it in externally'
latents = latents if exists(latents) else self.latents
if latents.ndim == 2:
latents = repeat(latents, 'n d -> b n d', b = b)
latents = self.attn1(latents, x, mask = mask)
out = self.attn2(x, latents)
return out, latents
| isab-pytorch-main | isab_pytorch/isab_pytorch.py |
from isab_pytorch.isab_pytorch import ISAB
| isab-pytorch-main | isab_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'rgn2-replica',
packages = find_packages(),
version = '0.0.1',
license='CreativeCommons4.0',
description = 'RGN2-REPLICA: Replicating a SoTA model for Protein Folding with no homologs (wip)',
author = 'Eric Alcaide',
author_email = 'ericalcaide1@gmail.com',
url = 'https://github.com/hypnopump/rgn2-replica',
keywords = [
'artificial intelligence',
'bioinformatics',
'protein folding',
'protein structure prediction'
],
install_requires=[
'einops>=0.3',
'numpy',
'torch>=1.6',
'sidechainnet',
'proDy',
'tqdm',
'mp-nerf',
'datasets>=1.10',
'transformers>=4.2',
'x-transformers>=0.16.1',
'pytorch-lightning>=1.4',
'wandb',
'fair-esm>=0.4.0'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python :: 3.7',
],
) | rgn2-replica-main | setup.py |
import torch
import numpy as numpy
from rgn2_replica.utils import *
def test_chunk_permute():
seq = torch.tensor([1,2,3,4,5,6,7,8,9]*3)
res = chunk_permute(seq)
assert True
def test_masked_lang():
seq = torch.tensor([1,2,3,4,5,6,7,8,9]*3)
res = masked_lang(seq)
assert True
def test_mask_seq():
seq = torch.tensor([1,2,3,4,5,6,7,8,9]*3)
res = mask_seq(seq)
assert True | rgn2-replica-main | tests/test_utils.py |
import torch
from rgn2_replica.losses import AminoBERTLoss
def test_aminobert_loss():
vocab_size = 24
bs = 20
logit_out = torch.rand(bs, 10, vocab_size)
logit_chunk_perm = torch.rand(bs, 2)
target = torch.randint(1, 20, (bs, 10))
chunk_perm = torch.randint(0, 2, (bs,))
loss_func = AminoBERTLoss(vocab_size=vocab_size)
loss = loss_func(logit_out, logit_chunk_perm, target, chunk_perm)
assert True
def test_go_pfam_loss():
num_classes = 45
bs = 40
target_pfam = torch.randint(0, num_classes, (bs, num_classes)).float()
target_go = torch.randint(0, num_classes, (bs, num_classes)).float()
logit_pfam = torch.rand(bs, num_classes)
logit_go = torch.rand(bs, num_classes)
weights = [2, 3]
loss_func = GoPFAMLoss(weights=weights)
loss = loss_func(
logit_go=logit_go,
logit_pfam=logit_pfam,
target_go=target_go,
target_pfam=target_pfam,
)
assert True
def test_loss_wrapper():
num_classes = 45
bs = 40
target_pfam = torch.randint(0, num_classes, (bs, num_classes)).float()
target_go = torch.randint(0, num_classes, (bs, num_classes)).float()
logit_pfam = torch.rand(bs, num_classes)
logit_go = torch.rand(bs, num_classes)
weights = [2, 0.6]
loss_go_pfam = GoPFAMLoss(weights=weights)
vocab_size = 24
logit_out = torch.rand(bs, 10, vocab_size)
logit_chunk_perm = torch.rand(bs, 2)
target = torch.randint(1, 20, (bs, 10))
chunk_perm = torch.randint(0, 2, (bs,))
loss_lm = AminoBERTLoss(vocab_size=vocab_size)
combine_loss = LossWrapper(lm_loss=loss_lm, aux_loss=loss_go_pfam, weights=[2, 3])
loss = combine_loss(
logit_out=logit_out,
logit_chunk_perm=logit_chunk_perm,
logit_go=logit_go,
logit_pfam=logit_pfam,
target_go=target_go,
target_pfam=target_pfam,
chunk_perm=chunk_perm,
target=target,
)
assert True
| rgn2-replica-main | tests/test_loss.py |
# Author: Eric Alcaide
import os
import sys
if __name__ == "__main__":
pass
| rgn2-replica-main | tests/tests.py |
# Author: Eric Alcaide ( @hypnopump )
import time
import gc
import random
import numpy as np
import torch
from einops import rearrange, repeat
from functools import partial
import mp_nerf
from rgn2_replica.rgn2 import *
from rgn2_replica.utils import *
from rgn2_replica.rgn2_utils import *
# logging
WANDB = True
try:
import wandb
except:
WANDB = False
print("Failed to import wandb. LOGGING NOT AVAILABLE")
def batched_inference(*args, model, embedder,
mode="test", device="cpu", recycle_func=lambda x: 1):
""" Inputs:
* args: iterable of outputs from mp_nerf.utils.get_prot()
( seq, int_seq, true_coords, angles, padding_seq, mask, pid )
* model: torch.nn.Module / extension. model to do inference on
* embedder: func generator of NLP embeddings or torhc.nn.Embedding
* mode: str. [~"train"~, "test"]. For either next pred, or full pred
Only works for test atm.
* device: str or torch.device
* recycle_func: func. returns number of reycling iters
Outputs:
* (B, L, 4)
"""
# bacth tokens,masks and to device - Out: (B, L)
batch_dim = len(args)
max_seq_len = max(x[1].shape[-1] for x in args)
# create scaffolds
int_seq = torch.ones(batch_dim, max_seq_len, dtype=torch.long) * 20 # padding tok
# mask is true mask. long mask is for lstm
mask, long_mask = torch.zeros(2, *int_seq.shape, dtype=torch.bool)
true_coords = torch.zeros(int_seq.shape[0], int_seq.shape[1]*14, 3, device=device)
# fill scaffolds
for i,arg in enumerate(args):
mask[i, :arg[1].shape[-1]] = arg[-2]
long_mask[i, :arg[1].shape[-1]] = True
int_seq[i, :arg[1].shape[-1]] = arg[1]
true_coords[i, :arg[1].shape[-1]*14] = arg[2]
mask = mask.bool().to(device)
coords = rearrange(true_coords, 'b (l c) d -> b l c d', c=14)
ca_trace = coords[..., 1, :]
coords_rebuilt = mp_nerf.proteins.ca_bb_fold( ca_trace ) # beware extremes
# calc angle labels
angles_label_ = torch.zeros(*ca_trace.shape[:-1], 2, dtype=torch.float, device=device)
angles_mask_ = torch.zeros_like(angles_label_).bool() # propagate mask to angles w/ missing points
for i, arg in enumerate(args):
length = arg[1].shape[-1]
angles_label_[i, 1:length-1, 0] = mp_nerf.utils.get_cosine_angle(
ca_trace[i, :length-2 , :],
ca_trace[i, 1:length-1, :],
ca_trace[i, 2:length , :],
)
angles_label_[i, 2:length-1, 1] = mp_nerf.utils.get_dihedral(
ca_trace[i, :length-3 , :],
ca_trace[i, 1:length-2, :],
ca_trace[i, 2:length-1, :],
ca_trace[i, 3:length , :],
)
angles_mask_[i, 1:length-1, 0] = (
mask[i, :length-2] * mask[i, 1:length-1] * mask[i, 2:length]
)
angles_mask_[i, 2:length-1, 0] = (
mask[i, :length-3] * mask[i, 1:length-2] * mask[i, 2:length-1] * mask[i, 3:length]
)
# replace nan and (angles whose coords are not fully known) by 0.
# later don't count them
angles_label_[~angles_mask_] = 0.
angles_label_[angles_label_ != angles_label_] = 0.
points_label = mp_nerf.ml_utils.angle_to_point_in_circum(angles_label_) # (B, L, 2, 2)
# include angles of previous AA as input
points_input = points_label.clone()
points_input = torch.cat([
points_input[..., :1, :, :], points_input[..., :-1, :, :]
], dim=-3)
angles_input = rearrange(points_input, "... c d -> ... (c d)")
# EMBEDD
if isinstance(embedder, torch.nn.Embedding):
embedds = embedder(int_seq.to(device))
else:
embedds = embedder(int_seq)
embedds = torch.cat([
embedds,
# don't pass angles info - just 0 at start (sin=0, cos=1)
torch.zeros_like(angles_input) + angles_input[:, :1],
], dim=-1)
# PREDICT
if mode == "train":
# get angles
preds, r_iters = model.forward(embedds, mask=long_mask,
recycle=recycle_func(None)) # (B, L, 4)
elif mode == "test":
preds, r_iters = model.predict_fold(embedds, mask=long_mask,
recycle=recycle_func(None)) # (B, L, 4)
elif mode == "fast_test":
embedds[:, :, -4:] = embedds[:, :, -4:] * 0. # zero out angle features
preds, r_iters = model.forward(embedds, mask=long_mask,
recycle=recycle_func(None)) # , inter_recycle=True
points_preds = rearrange(preds, '... (a d) -> ... a d', a=2) # (B, L, 2, 2)
# POST-PROCESS
points_preds, ca_trace_pred, frames_preds, wrapper_pred = pred_post_process(
points_preds, mask=long_mask
)
# get frames for for later fape
bb_ca_trace_rebuilt, frames_labels = mp_nerf.proteins.ca_from_angles(
points_label.reshape(points_label.shape[0], -1, 4) # (B, L, 2, 2) -> (B, L, 4)
)
return (
{
"seq": arg[0],
"int_seq": arg[1],
"angles": arg[2],
"padding_seq": arg[3],
"mask": arg[5].bool(),
"long_mask": long_mask[i, :arg[1].shape[-1]],
"pid": arg[6],
# labels
"true_coords": true_coords[i:i+1, :arg[1].shape[-1]*14], # (1, (L C), 3)
"coords": coords[i:i+1, :arg[1].shape[-1]], # (1, L, C, 3)
"ca_trace": ca_trace[i:i+1, :arg[1].shape[-1]], # (1, L, 3)
"angles_label": angles_label_[i:i+1, :arg[1].shape[-1]], # (1, L, 2)
"points_label": points_label[i:i+1, :arg[1].shape[-1]], # (1, L, 2, 2)
"frames_labels": frames_labels[i, :arg[1].shape[-1]], # (L, 3, 3)
# inputs
"points_input": angles_input[i:i+1, :arg[1].shape[-1]], # (1, L, 4)
# preds
"wrapper_pred": wrapper_pred[i:i+1, :arg[1].shape[-1]], # (1, L, C, 3)
"ca_trace_pred": ca_trace_pred[i:i+1, :arg[1].shape[-1]],# (1, L, C, 3)
"points_preds": points_preds[i:i+1, :arg[1].shape[-1]], # (1, L, 4)
"frames_preds": frames_preds[i, :arg[1].shape[-1]], # (L, 3, 3)
# (iters, L, 4) - only if available
"r_iters": r_iters[i, :, :arg[1].shape[-1]] if len(r_iters.shape) > 2 else r_iters[i],
} for i,arg in enumerate(args)
)
def inference(*args, model, embedder,
mode="train", device="cpu", recycle_func=lambda x: 1):
""" Inputs:
* args: output from mp_nerf.utils.get_prot()
* model: torch.nn.Module / extension. model to do inference on
* embedder: func generator of NLP embeddings or torhc.nn.Embedding
* mode: str. ["train", "test", "fast_test"]. For either next pred,
or full pred. "test" does AR, "fast_test" does iterative
refinement (good approximation and 10x faster)
* device: str or torch.device
* recycle_func: func. returns number of reycling iters
Outputs:
* output_dict
"""
seq, int_seq, true_coords, angles, padding_seq, mask, pid = args
int_seq = int_seq.unsqueeze(0)
mask = mask.bool().to(device)
long_mask = torch.ones_like(mask)
coords = rearrange(true_coords, '(l c) d -> () l c d', c=14).to(device)
ca_trace = coords[..., 1, :]
coords_rebuilt = mp_nerf.proteins.ca_bb_fold( ca_trace )
# mask for thetas and chis
angles_label_ = torch.zeros(*ca_trace.shape[:-1], 2, dtype=torch.float, device=device)
angles_mask_ = torch.zeros_like(angles_label_).bool()
angles_label_[..., 1:-1, 0] = mp_nerf.utils.get_cosine_angle(
ca_trace[..., :-2 , :],
ca_trace[..., 1:-1, :],
ca_trace[..., 2: , :],
)
angles_label_[..., 2:-1, 1] = mp_nerf.utils.get_dihedral(
ca_trace[..., :-3 , :],
ca_trace[..., 1:-2, :],
ca_trace[..., 2:-1, :],
ca_trace[..., 3: , :],
)
angles_mask_[..., 1:-1, 0] = (
mask[i, :-2] * mask[i, 1:-1] * mask[i, 2:]
)
angles_mask_[i, 2:-1, 0] = (
mask[i, :-3] * mask[i, 1:-2] * mask[i, 2:-1], mask[i, 3:]
)
# replace nan and (angles whose coords are not fully known) by 0.
angles_label_[~angles_mask_] = 0.
angles_label_[angles_label_ != angles_label_] = 0.
points_label = mp_nerf.ml_utils.angle_to_point_in_circum(angles_label_) # (B, L, 2, 2)
# include angles of previous AA as input
points_input = points_label.clone()
points_input = torch.cat([
points_input[..., :1, :, :], points_input[..., :-1, :, :]
], dim=-3)
angles_input = rearrange(points_input, "... c d -> ... (c d)")
# PREDICT
if isinstance(embedder, torch.nn.Embedding):
embedds = embedder(int_seq.to(device))
else:
embedds = embedder(int_seq)
embedds = torch.cat([
embedds,
# don't pass angles info - just 0 at start (sin=0, cos=1)
torch.zeros_like(angles_input) + angles_input[:, :1],
], dim=-1)
if mode == "train":
preds, r_iters = model.forward(embedds, mask=long_mask,
recycle=recycle_func(None)) # (B, L, 4)
elif mode == "test":
preds, r_iters = model.predict_fold(embedds, mask=long_mask,
recycle=recycle_func(None)) # (B, L, 4)
elif mode == "fast_test":
embedds[:, :, -4:] = embedds[:, :, -4:] * 0. # zero out angle features
preds, r_iters = model.forward(embedds, mask=long_mask,
recycle=recycle_func(None)) # , inter_recycle=True
points_preds = rearrange(preds, '... (a d) -> ... a d', a=2) # (B, L, 2, 2)
# post-process
points_preds, ca_trace_pred, frames_preds, wrapper_pred = pred_post_process(
points_preds, mask=long_mask
)
# get frames for for later fape
bb_ca_trace_rebuilt, frames_labels = mp_nerf.proteins.ca_from_angles(
points_label.reshape(1, -1, 4) # (B, L, 2, 2) -> (B, L, 4)
)
return {
"seq": seq,
"int_seq": int_seq,
"angles": angles,
"padding_seq": padding_seq,
"mask": mask,
"long_mask": long_mask,
"pid": pid,
# labels
"true_coords": true_coords, # (B, (L C), 3)
"coords": coords, # (B, L, C, 3)
"ca_trace": ca_trace,
"angles_label": angles_label_, # (L, 2)
"points_label": points_label,
"frames_labels": frames_labels, # (L, 3, 3)
# inputs
"points_input": angles_input,
# preds
"wrapper_pred": wrapper_pred, # (1, L, C, 3)
"ca_trace_pred": ca_trace_pred, # (1, L, C, 3)
"points_preds": points_preds,
"frames_preds": frames_preds, # (L, 3, 3)
"r_iters": r_iters,
}
def predict(get_prot_, steps, model, embedder, return_preds=True,
accumulate_every=1, log_every=None, seed=None, wandbai=False,
recycle_func=lambda x: 1, mode="test"):
""" Performs a batch prediction.
Can return whole list of preds or just metrics.
Inputs:
* get_prot_: mp_nerf.utils.get_prot() iterator
* steps: int. number of steps to predict
* model: torch model
* embedder: callable to get NLP embeddings from
* vocab_:
* return_preds: bool. whether to return predictions as well
* accumulate_every: int. batch size.
* log_every: int or None. print on screen every X batches.
* seed:
* wandbai:
* recycle_func: func. number of recycle iters per sample
* mode: str. one of "test" (ar prediction) or "fast_test"
good on-the-ground approx if recycle ~ 10.
Outputs:
* preds_list: (steps, dict) list
* metrics_list: (steps, dict) list
"""
model = model.eval()
device = next(model.parameters()).device
preds_list, metrics_list = [], []
b = 0
tic = time.time()
while b < (steps//accumulate_every):
if b == 0 and seed is not None:
set_seed(seed)
# get + predict
with torch.no_grad():
prots = [ next(get_prot_) for i in range(accumulate_every) ]
infer_batch = batched_inference(
*prots,
model=model, embedder=embedder,
mode=mode, device=device, recycle_func=recycle_func
)
# calculate metrics || calc loss terms || baselines for next-term: torsion=2, fape=0.95
for infer in infer_batch:
# discard 0. angles (result of unknown coord, padding, etc)
angle_mask = infer["angles_label"] != 0.
torsion_loss = mp_nerf.ml_utils.torsion_angle_loss(
pred_points=infer["points_preds"][angle_mask].reshape(1, -1, 1, 2), # (B, no_pad_among(L*2), 1, 2)
true_points=infer["points_label"][angle_mask].reshape(1, -1, 1, 2), # (B, no_pad_among(L*2), 1, 2)
)
# violation loss btween calphas - L1
dist_mat = mp_nerf.utils.cdist(infer["wrapper_pred"][:, :, 1],
infer["wrapper_pred"][:, :, 1],) # B, L, L
dist_mat[:, np.arange(dist_mat.shape[-1]), np.arange(dist_mat.shape[-1])] = 5.
viol_loss = -(dist_mat - 3.78).clamp(min=-np.inf, max=0.)
# calc metrics
log_dict = {
"torsion_loss": torsion_loss.mean().item(),
"viol_loss": viol_loss.mean().item()
}
metrics = mp_nerf.proteins.get_protein_metrics(
true_coords=infer["coords"][:, infer["mask"]],
pred_coords=infer["ca_trace_pred"][:, infer["mask"]],
detach=True
)
log_dict.update({
k:v.mean().item() for k,v in metrics.items() if "wrap" not in k
})
# record
metrics_list.append( log_dict )
if wandbai and WANDB:
wandb.log(metrics_list[-1])
if return_preds:
# pass all to cpu - free mem in the gpu
for k,v in infer.items():
if isinstance(v, torch.Tensor):
infer[k] = v.cpu()
preds_list.append( infer )
# free mem - slow
del infer
gc.collect()
# log
if log_every and (b-1) % log_every == 0:
tac = time.time()
print("Batch {0}/{1}, metrics_last_ex = {2}. Took: {3} seconds".format(
(b-1) // log_every, steps // log_every, log_dict, np.round(tac-tic, decimals=3)
))
tic = tac
# go to next bacth
b += 1
metrics_stats = { "eval_"+k : \
np.mean([ metrics[k] for metrics in metrics_list ]) \
for k in metrics_list[0].keys()
}
return preds_list, metrics_list, metrics_stats
def train(get_prot_, steps, model, embedder, optim, loss_f=None,
clip=None, accumulate_every=1, log_every=None, seed=None, wandbai=False,
recycle_func=lambda x: 1):
""" Performs a batch prediction.
Can return whole list of preds or just metrics.
Inputs:
* get_prot_: mp_nerf.utils.get_prot() iterator
* steps: int. number of steps to predict
* embedder: callable to get NLP embeddings from
* optim: torch.Opim for training.
* loss_f: str or None. custom expression for the loss
* clip: float or None. Gradient clipping
* accumulate_every: int. effective batch size for backprop
* log_every: int or None. print every X number of batches.
* seed: int or None.
* wandbai: bool. whether to log to W&B
* recycle_func: func. number of recycle iters per sample
Outputs:
* preds_list: (steps, dict) list
* metrics_list: (steps, dict) list
"""
model = model.train()
device = next(model.parameters()).device
metrics_list = []
b = 0
loss = 0.
tic = time.time()
while b < (steps//accumulate_every): # steps: #
if b == 0 and seed is not None:
set_seed(seed)
# get data + predict
prots = [ next(get_prot_) for i in range(accumulate_every) ]
infer_batch = batched_inference(
*prots,
model=model, embedder=embedder,
mode="train", device=device, recycle_func=recycle_func
)
# calculate metrics
loss_batch = 0
for i, infer in enumerate(infer_batch):
# calc loss terms
angle_mask = infer["angles_label"] != 0.
torsion_loss = mp_nerf.ml_utils.torsion_angle_loss(
pred_points=infer["points_preds"][angle_mask].reshape(1, -1, 1, 2), # (B, no_pad_among(L*2), 1, 2)
true_points=infer["points_label"][angle_mask].reshape(1, -1, 1, 2), # (B, no_pad_among(L*2), 1, 2)
)
# violation loss btween calphas - L1
dist_mat = mp_nerf.utils.cdist(infer["wrapper_pred"][:, :, 1],
infer["wrapper_pred"][:, :, 1],) # B, L, L
dist_mat[:, np.arange(dist_mat.shape[-1]), np.arange(dist_mat.shape[-1])] = 5.
viol_loss = -(dist_mat - 3.78).clamp(min=-np.inf, max=0.)
# calc metrics
log_dict = {
"torsion_loss": torsion_loss.mean().item(),
"viol_loss": viol_loss.mean().item()
}
metrics = mp_nerf.proteins.get_protein_metrics(
true_coords=infer["coords"][:, infer["mask"]],
pred_coords=infer["ca_trace_pred"][:, infer["mask"]],
detach=False
)
log_dict.update({k:v.mean().item() for k,v in metrics.items() if "wrap" not in k})
# calc loss
prev_loss = loss.item() if isinstance(loss, torch.Tensor) else loss
if isinstance(loss_f, str):
loss += eval(loss_f)
else:
loss += torsion_loss.mean() + metrics["drmsd"].mean() # +
# record
log_dict["loss"] = loss.item() - prev_loss
metrics_list.append( log_dict )
if wandbai and WANDB:
wandb.log(metrics_list[-1])
# clip gradients - p.44 AF2 methods section
# update weights
(loss/accumulate_every).mean().backward() # retain_graph=True
if clip:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optim.step()
optim.zero_grad()
loss = 0.
# log
if (b-1) % log_every == 0:
tac = time.time()
print("Batch {0}/{1}, metrics_last_ex = {2}. Took: {3} seconds".format(
(b-1), # // accumulate_every,
steps // accumulate_every,
{k: np.mean([x[k] for x in metrics_list[-log_every:]]) \
for k in log_dict.keys()},
np.round(tac-tic, decimals=3)
))
tic = tac
# go to next bacth
b += 1
metrics_stats = { "train_"+k : \
np.mean([ metrics[k] for metrics in metrics_list ]) \
for k in metrics_list[0].keys()
}
return metrics_list, metrics_stats
#############################
## MAKING REAL PREDICTIONS ##
#############################
def infer_from_seqs(seq_list, model, embedder,
recycle_func=lambda x: 10, device="cpu"):
""" Infers structures for a sequence of proteins.
Inputs:
* seq_list: list of str. Protein sequences in FASTA format
* model: torch.nn.Module pytorch model
* embedder: torch.nn.Module pytorch model.
* recycle_func: func -> int. number of recycling iterations. a lower value
makes prediction faster. Past 10, improvement is marginal.
* device: str or torch.device. Device for inference. CPU is slow.
Outputs: dict of
* coords: list of torch.FloatTensor. Each of shape (L, 14, 3)
* int_seq: list of torch.LongTensor. Each of shape (L, )
"""
batch_dim = len(seq_list)
lengths = [len(x) for x in seq_list]
max_seq_len = max(lengths)
# group in batch - init tokens to padding tok
int_seq = torch.ones(batch_dim, max_seq_len, dtype=torch.long)*21
for i, seq in enumerate(seq_list):
int_seq[i, :lengths[i]] = torch.tensor([
mp_nerf.kb_proteins.AAS2INDEX[aa] for aa in seq
])
int_seq = int_seq.to(device)
mask = int_seq != 21 # tokens to predict
# get embeddings
if isinstance(embedder, torch.nn.Embedding):
embedds = embedder(int_seq.to(device))
else:
embedds = embedder(int_seq)
embedds = torch.cat([
embedds,
torch.zeros_like(embedds[..., -4:])
], dim=-1)
# don't pass angles info - just 0 at start (sin=0, cos=1)
embedds[:, :, [1, 3]] = 1.
# pred
with torch.no_grad():
preds, r_iters = model.forward(embedds, mask=None, recycle=recycle_func(None))
points_preds = rearrange(preds, '... (a d) -> ... a d', a=2) # (B, L, 2, 2)
# POST-PROCESS
points_preds, ca_trace_pred, frames_preds, wrapper_pred = pred_post_process(
points_preds, seq_list=seq_list, mask=mask
)
return {
# (L, 14, 3)
"coords": [ wrapper_pred[i, :lengths[i]] for i in range(batch_dim) ],
# (L, )
"int_seq": [ int_seq[i, :lengths[i]] for i in range(batch_dim) ],
# (L, 2, 2)
"points_preds": [ points_preds[i:i+1, :lengths[i]] for i in range(batch_dim) ],
# (L, 3, 3)
"frames_preds": [ frames_preds[i, :lengths[i]] for i in range(batch_dim)] ,
}
| rgn2-replica-main | rgn2_replica/rgn2_trainers.py |
from rgn2_replica.rgn2 import *
from rgn2_replica.rgn2_utils import *
from rgn2_replica.utils import * | rgn2-replica-main | rgn2_replica/__init__.py |
# Author: Gurvinder Singh (@gurvindersingh)
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import get_linear_schedule_with_warmup
from x_transformers import Encoder, TransformerWrapper
from transformers.optimization import AdamW
from .losses import GoPFAMLoss
from .tokenizer import Tokenizer
from .dataset import ProteinLMDataModule
class ProteinLMModel(pl.LightningModule):
def __init__(
self,
vocab_size,
pfam_size,
go_size,
lr,
warmup_steps=2000,
wd=0.0,
bsize=4,
gpus=1,
epochs=4,
max_len=1024,
dim=512,
depth=8,
heads=8,
ff_glu=True,
use_rmsnorm=True,
rotary_pos_emb=True,
ff_dropout=0.0,
):
super().__init__()
self.save_hyperparameters()
base_model = TransformerWrapper(
num_tokens=vocab_size,
max_seq_len=max_len,
attn_layers=Encoder(
dim=dim,
depth=depth,
heads=heads,
ff_glu=ff_glu,
use_rmsnorm=use_rmsnorm,
rotary_pos_emb=rotary_pos_emb,
ff_dropout=ff_dropout,
)
)
pfam_head = ClassificationHead(dim, pfam_size)
go_head = ClassificationHead(dim, go_size)
self.model = nn.Sequential(
base_model,
go_head,
pfam_head,
)
def forward(self, x):
return self.model(x)
def process(self, batch):
x, mask, labels, pfam_labels, go_labels = (
batch["input_ids"],
batch["attention_mask"].bool(),
batch["labels"],
batch["pfam_labels"],
batch["go_labels"],
)
embs = self.model[0](x, mask=mask, return_embeddings=True)
logits_mlm = self.model[0].to_logits(embs)
logits_go = self.model[1](embs)
logits_pfam = self.model[2](embs)
mlm_loss_fct = nn.CrossEntropyLoss()
gopfam_loss_fct = GoPFAMLoss()
mlm_loss = mlm_loss_fct(logits_mlm.view(-1, self.hparams.vocab_size), labels.view(-1))
gopfam_loss = gopfam_loss_fct(logits_go, logits_pfam, go_labels, pfam_labels)
return mlm_loss + gopfam_loss
def training_step(self, batch, batch_idx):
loss = self.process(batch)
self.log_dict(
{"train_loss": loss},
on_epoch=True,
prog_bar=True,
sync_dist=True,
)
return loss
def validation_step(self, batch, batch_idx):
loss = self.process(batch)
self.log_dict(
{"valid_loss": loss},
on_step=True,
sync_dist=True,
prog_bar=True,
)
return loss
def setup(self, stage):
if stage == "fit":
# Get dataloader by calling it - train_dataloader() is called after setup() by default
trainLoader = self.train_dataloader()
# Calculate total steps
self.total_steps = (
(
len(trainLoader.dataset)
// (self.hparams.bsize * max(1, self.hparams.gpus))
)
// 1
* float(self.hparams.epochs)
)
def configure_optimizers(self):
"Prepare optimizer and schedule (linear warmup and decay)"
model = self.model
no_decay = ["bias", "norm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.hparams.wd,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.hparams.lr,
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.warmup_steps,
num_training_steps=self.total_steps,
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
class ClassificationHead(nn.Module):
def __init__(self, inpDim, outDim, dropoutProb=0.1):
super().__init__()
self.dropout = nn.Dropout(dropoutProb)
self.cls = nn.Linear(inpDim, outDim)
def forward(self, states):
x = states[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.cls(x)
return x | rgn2-replica-main | rgn2_replica/lmmodel.py |
from typing import Optional
import pyrosetta
from tqdm import tqdm
"""
NOTE: Remember to initialize PyRosetta before using these functions
Example:
import pyrosetta
pyrosetta.init("-mute all")
If you need to see Rosetta outputs, remove '-mute all'
"""
def get_fa_min_mover(
max_iter: int = 1000) -> pyrosetta.rosetta.protocols.moves.Mover:
""" Create full-atom minimization mover
Inputs:
* max_iter: int. Maximum number of iterations for MinMover
"""
# Create full-atom score function with terms for fixing bad bond lengths
sf = pyrosetta.create_score_function('ref2015_cst')
sf.set_weight(pyrosetta.rosetta.core.scoring.ScoreType.cart_bonded, 1)
sf.set_weight(pyrosetta.rosetta.core.scoring.ScoreType.pro_close, 0)
# Allow movement of backbone, side chains, and chain breaks
mmap = pyrosetta.rosetta.core.kinematics.MoveMap()
mmap.set_bb(True)
mmap.set_chi(True)
mmap.set_jump(True)
# Create MinMover acting in cartesian space
min_mover = pyrosetta.rosetta.protocols.minimization_packing.MinMover(
mmap, sf, 'lbfgs_armijo_nonmonotone', 0.0001, True)
min_mover.max_iter(max_iter)
min_mover.cartesian(True)
return min_mover
def get_fa_relax_mover(
max_iter: int = 200, coord_constraint: float = 0.5
) -> pyrosetta.rosetta.protocols.moves.Mover:
""" Create full-atom relax mover
Inputs:
* max_iter: int. Maximum number of iterations for FastRelax
"""
# Create full-atom score function
sf = pyrosetta.create_score_function('ref2015_cst')
sf.set_weight(
pyrosetta.rosetta.core.scoring.ScoreType.coordinate_constraint,
coord_constraint
)
# Allow movement of backbone, side chains, and chain breaks
mmap = pyrosetta.rosetta.core.kinematics.MoveMap()
mmap.set_bb(True)
mmap.set_chi(True)
mmap.set_jump(True)
# Create FastRelax mover acting in dualspace (cartesian and internal space)
relax_mover = pyrosetta.rosetta.protocols.relax.FastRelax()
relax_mover.set_scorefxn(sf)
relax_mover.max_iter(max_iter)
relax_mover.dualspace(True)
relax_mover.set_movemap(mmap)
relax_mover.ramp_down_constraints(True)
return relax_mover
def quick_refine(in_pdb: str, out_pdb: Optional[str] = None, min_iter: int = 1000):
""" PyRosetta protocol for minimization refinement of protein structure
Inputs:
* in_pdb: str. Path to PDB file to be refined
* out_pdb: str. Path to save refined PDB file
* min_iter: int. Maximum number of iterations for MinMover
"""
if out_pdb is None:
out_pdb = in_pdb
# Load input PDB into pose
pose = pyrosetta.pose_from_pdb(in_pdb)
# Create movers
cst_mover = pyrosetta.rosetta.protocols.relax.AtomCoordinateCstMover()
cst_mover.cst_sidechain(False)
min_mover = get_fa_min_mover(min_iter)
idealize_mover = pyrosetta.rosetta.protocols.idealize.IdealizeMover()
# Refine structure
cst_mover.apply(pose)
min_mover.apply(pose)
idealize_mover.apply(pose)
# Save refined structure to PDB
pose.dump_pdb(out_pdb)
def relax_refine(
in_pdb: str,
out_pdb: Optional[str] = None,
min_iter: int = 1000,
relax_iter: int = 200,
coord_constraint: float = 0.5,
):
""" PyRosetta protocol for relaxation and minimization of protein structure
Inputs:
* in_pdb: str. Path to PDB file to be refined
* out_pdb: str. Path to save refined PDB file
* min_iter: int. Maximum number of iterations for MinMover
* relax_iter: int. Maximum number of iterations for FastRelax
rosetta_refine
"""
if out_pdb is None:
out_pdb = in_pdb
# Load input PDB into pose
pose = pyrosetta.pose_from_pdb(in_pdb)
# Create movers
cst_mover = pyrosetta.rosetta.protocols.relax.AtomCoordinateCstMover()
cst_mover.cst_sidechain(False)
min_mover = get_fa_min_mover(min_iter)
relax_mover = get_fa_relax_mover(relax_iter, coord_constraint)
idealize_mover = pyrosetta.rosetta.protocols.idealize.IdealizeMover()
# Refine structure
cst_mover.apply(pose)
min_mover.apply(pose)
relax_mover.apply(pose)
min_mover.apply(pose)
idealize_mover.apply(pose)
# Save refined structure to PDB
pose.dump_pdb(out_pdb)
# TODO: ADD AF2 REFINE/RELAX STEP:
# https://github.com/sokrypton/ColabFold/blob/main/beta/AlphaFold2_advanced_beta.ipynb
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="protein file")
parser.add_argument("--output", help="protein file for output", default=None)
parser.add_argument("--relax_iters", help="energy minimization iterations", default=100)
parser.add_argument("--relax_iters", help="relaxation iterations", default=300)
parser.add_argument("--coord_constraint", help="for coord mod. higher = sticter", default=0.5)
parser.add_argument("--mess", help="message to print before attempt")
args = parser.parse_args()
if args.output is None:
args.output = args.input.replace(".pdb", "_refined.pdb")
pyrosetta.init("-mute all")
relax_refine(
args.input,
args.output,
min_iter=args.min_iters,
relax_iter=args.relax_iters,
coord_constraint=args.coord_constraint,
)
print("All done")
| rgn2-replica-main | rgn2_replica/rgn2_refine.py |
import torch
from rgn2_replica.rgn2_utils import *
class ClaspEmbedder(torch.nn.Module):
def __init__(self, config, device):
super().__init__()
from clasp import Transformer as ClaspTransformer, basic_aa_tokenizer
self.tokenizer = basic_aa_tokenizer
self.device = device
# TODO: build encoders based on the config
self.clasp_bioseq_encoder = ClaspTransformer(
num_tokens = 23,
dim = 768,
depth = 12,
seq_len = 512,
sparse_attn = False,
reversible=True
)
self.clasp_bioseq_encoder.load_state_dict(torch.load(config.embedder_checkpoint_path, map_location=device))
self.clasp_bioseq_encoder.eval()
def forward(self, aa_seq):
""" Generates embeddings.
MP-NeRF: https://github.com/EleutherAI/mp_nerf
Inputs:
* aa_seq: list of FASTA strs or
torch.Tensor (B, L) according to MP-NeRF encoding
"""
# format
if isinstance(aa_seqs, torch.Tensor):
aa_seq = ids_to_embed_input(to_cpu(aa_seqs).tolist())
with torch.no_grad():
tokenized_seq = self.tokenizer(aa_seq, context_length=len(aa_seq), return_mask=False)
all_embeddings = self.clasp_bioseq_encoder(to_device(tokenized_seq.unsqueeze(0), self.device), return_all_embeddings=True)
# drop CLS embedding, return per-token embeddings only
return all_embeddings[:, 1:]
class EsmEmbedder(torch.nn.Module):
def __init__(self, device):
super().__init__()
import esm
self.embedder, alphabet = esm.pretrained.esm1b_t33_650M_UR50S()
self.batch_converter = alphabet.get_batch_converter()
self.device = device
def forward(self, aa_seqs):
""" Generates embeddings.
MP-NeRF: https://github.com/EleutherAI/mp_nerf
Inputs:
* aa_seq: list of FASTA strs or
torch.Tensor (B, L) according to MP-NeRF encoding
"""
# format
if isinstance(aa_seqs, torch.Tensor):
aa_seq = ids_to_embed_input(to_cpu(aa_seqs).tolist())
# use ESM transformer
REPR_LAYER_NUM = 33
max_seq_len = max([len(aa_seq) for aa_seq in aa_seqs])
batch_labels, batch_strs, batch_tokens = self.batch_converter(aa_seq)
with torch.no_grad():
results = self.embedder(
to_device(batch_tokens, self.device),
repr_layers=[REPR_LAYER_NUM],
return_contacts=False
)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:max_seq_len+1, :]
return token_reps.detach()
def get_embedder(config, device):
"""Returns embedding model based on config.embedder_model
Usage:
config.embedder_model = 'clasp'
OR
config.embedder_model = 'esm1b'
embedder = embedders.get_embedder(config, device)
embeddings = embedder(aa_seq)
"""
if config.embedder_model == 'clasp':
print('Loading CLASP embedding model')
config.emb_dim = 768
config.embedder_checkpoint_path = '../clasp/data/run48_2021-07-18_13_31_19_step00005000.bioseq_encoder.pt'
emb_model = ClaspEmbedder(config, device)
else:
print('Loading ESM-1b embedding model')
config.emb_dim = 1280
emb_model = EsmEmbedder(device)
return emb_model.to(device) | rgn2-replica-main | rgn2_replica/embedders.py |
# Author: Gurvinder Singh (@gurvindersingh)
import pytorch_lightning as pl
from datasets import load_from_disk
from .tokenizer import Tokenizer
from transformers import DataCollatorForLanguageModeling
import torch
from torch.utils.data.dataloader import DataLoader
from sklearn.preprocessing import MultiLabelBinarizer
class ProteinLMDataset():
def __init__(self, ds, pfam_size, go_size, max_len=1024, columns=['input_ids','pfam_labels','go_labels']):
self.max_len = max_len
self.ds = ds
self.ds.set_format(columns=columns)
self.pmlb = MultiLabelBinarizer().fit([list(range(pfam_size))])
self.gmlb = MultiLabelBinarizer().fit([list(range(go_size))])
def __len__(self):
return len(self.ds)
def __getitem__(self, idx):
data = {}
data['input_ids'] = torch.LongTensor(self.ds[idx]['input_ids'][:self.max_len])
data['pfam_labels'] = torch.FloatTensor(self.pmlb.transform([self.ds[idx]['pfam_labels']])[0])
data['go_labels'] = torch.FloatTensor(self.gmlb.transform([self.ds[idx]['go_labels']])[0])
return data
class ProteinLMDataModule(pl.LightningDataModule):
def __init__(
self,
ds_path,
pfam_size,
go_size,
dtype="torch",
bsize=16,
num_procs=4,
columns=["input_ids", "pfam_labels", "go_labels"],
mlm_probability=0.15,
pad_to_multiple_of=8,
max_len=1024,
):
super().__init__()
(
self.pfam_size,
self.go_size,
self.dtype,
self.bsize,
self.num_procs,
self.max_len,
) = (pfam_size, go_size, dtype, bsize, num_procs, max_len)
self.ds = load_from_disk(ds_path)
self.columns = columns
tok = Tokenizer()
self.data_collator = DataCollatorForLanguageModeling(
tokenizer=tok,
mlm_probability=mlm_probability,
pad_to_multiple_of=pad_to_multiple_of,
)
def train_dataloader(self):
return DataLoader(
ProteinLMDataset(self.ds['train'],
self.pfam_size,
self.go_size,
max_len=self.max_len,
columns=self.columns),
batch_size=self.bsize,
collate_fn=self.data_collator,
num_workers=self.num_procs,
)
def val_dataloader(self):
return DataLoader(
ProteinLMDataset(self.ds['valid'],
self.pfam_size,
self.go_size,
max_len=self.max_len,
columns=self.columns),
batch_size=self.bsize,
collate_fn=self.data_collator,
num_workers=self.num_procs,
)
| rgn2-replica-main | rgn2_replica/dataset.py |
# Author: Gurvinder Singh (@gurvindersingh)
from transformers import PreTrainedTokenizer
import numpy as np
class Tokenizer(PreTrainedTokenizer):
# Taken from mp_nerf and extended based on ESM
ALPHABETS = "ACDEFGHIKLMNPQRSTVWY_XBUZO"
SPLTOKENS = ['<cls>','<eos>','<pad>','<mask>']
def __init__(self, extra_tokens=[]):
ext_tokens = self.SPLTOKENS + extra_tokens
base_index = {aa:i for i,aa in enumerate(self.ALPHABETS)}
ext_index = {k: i for k,i in zip(self.SPLTOKENS, list(range(len(self.ALPHABETS),len(self.ALPHABETS)+len(ext_tokens)))) }
self.index = {**base_index, **ext_index}
self.cls_token = '<cls>'
self.mask_token = '<mask>'
self._pad_token = '<pad>'
self.padding_side = 'right'
self.name_or_path = 'ProteinSeqTokenizer'
def __len__(self):
return len(self.index)
@property
def vocab_size(self):
return len(self.index)
def tokenize(self, r):
"""
r: Dataset row with 'sequence' as one of the key
Returns: Tokenized sequence
To tokenize the dataset run the code as
>>> from datasets import load_from_disk
>>> ds = load_from_disk('path_to_dataset')
>>> tok = Tokenizer()
>>> ds = ds.map(tok.tokenize, num_proc=4)
>>> ds.save_to_disk('path_to_save_tokenized_ds')
"""
r['input_ids'] = [self.convert_tokens_to_ids(self.cls_token)]+self.convert_tokens_to_ids(list(r['sequence']))
return r
def get_special_tokens_mask(self, token_ids, already_has_special_tokens=False):
mask = np.zeros(len(token_ids), dtype=int)
mask[0] = 1 # <cls>
mask[np.array(token_ids) == self.index[self._pad_token]] = 1
return mask
def convert_tokens_to_ids(self, tokens):
if isinstance(tokens, str):
return self.index[tokens]
ids = []
for token in tokens:
ids.append(self.index[token])
return ids
def __repr__(self):
return f"PreTrainedTokenizer(name_or_path='{self.name_or_path}',vocab_size={self.vocab_size},padding_side='{self.padding_side}'"
| rgn2-replica-main | rgn2_replica/tokenizer.py |
# Author: Eric Alcaide ( @hypnopump )
import random
import math
import torch
import numpy as np
# random hacks - device utils for pyTorch - saves transfers
to_cpu = lambda x: x.cpu() if x.is_cuda else x
to_device = lambda x, device: x.to(device) if x.device != device else x
# system-wide utility functions
def expand_dims_to(t, length):
""" Expands up to N dimensions. Different from AF2 (inspo drawn):
* Only works for torch Tensors
* Expands to `t`, NOT `adds t dims`
https://github.com/lucidrains/alphafold2/blob/main/alphafold2_pytorch/utils.py#L63
Ex:
>>> expand_dims_to( torch.eye(8), length = 3) # (1, 8, 8)
>>> expand_dims_to( torch.eye(8), length = 1) # (8, 8)
"""
if not length - len(t.shape) > 0:
return t
return t.reshape(*((1,) * length - len(t.shape)), *t.shape)
def set_seed(seed, verbose=False):
try: random.seed(seed)
except: "Could not set `random` module seed"
try: np.random.seed(seed)
except: "Could not set `np.random` module seed"
try:
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
except:"Could not set `torch.manual_seed` module seed"
if verbose:
print("Seet seed to {0}".format(seed))
| rgn2-replica-main | rgn2_replica/utils.py |
import re
import torch
from sidechainnet.utils.sequence import ProteinVocabulary as VOCAB
# random hacks - device utils for pyTorch - saves transfers
to_cpu = lambda x: x.cpu() if x.is_cuda else x
to_device = lambda x, device: x.to(device) if x.device != device else x
VOCAB = VOCAB()
# data loading funcs copied from:
# https://github.com/hypnopump/alphafold2/blob/main/alphafold2_pytorch/utils.py#L330
def ids_to_embed_input(x):
""" Returns the amino acid string input for calculating the ESM and MSA transformer embeddings
Inputs:
* x: any deeply nested list of integers that correspond with amino acid id
"""
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_embed_input(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(lambda c: isinstance(c, str), out)):
return (None, ''.join(out).replace("_", ""))
return out
def get_esm_embedd(seq, embedd_model, batch_converter, msa_data=None):
""" Returns the ESM embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: ESM model (see train_end2end.py for an example)
* batch_converter: ESM batch converter (see train_end2end.py for an example)
Outputs: tensor of (b, L, embedd_dim)
* embedd_dim: number of embedding dimensions. 1280 for ESM-1b
"""
# use ESM transformer
device = next(embedd_model.parameters()).device
REPR_LAYER_NUM = 33
max_seq_len = seq.shape[-1]
embedd_inputs = ids_to_embed_input( to_cpu(seq).tolist() )
batch_labels, batch_strs, batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model( to_device(batch_tokens, device), repr_layers=[REPR_LAYER_NUM], return_contacts=False )
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:max_seq_len+1, :]
return token_reps.detach()
def seqs_from_fasta(fasta_file, names=False):
""" Reads protein sequences from FASTA files. """
seqs, names = [], []
with open(fasta_file, "r") as f:
lines = f.readlines()
for i, line in enumerate(lines):
if line[0] not in {">", ";"}:
names.append( lines[i-1][1:].replace(" ", "_").replace("\n", "") )
seqs.append( line.replace("\n", "") )
seqs = [re.sub(r'[^a-zA-Z]','', seq).upper() for seq in seqs]
return seqs if not names else (seqs, names)
| rgn2-replica-main | rgn2_replica/rgn2_utils.py |
import torch
from torch import nn
from typing import Optional
import torch.nn.functional as F
class AminoBERTLoss(nn.Module):
def __init__(self, padding_token=-100, vocab_size=24):
super().__init__()
self.masked_loss = nn.CrossEntropyLoss(
ignore_index=padding_token
)
self.chunk_perm_loss = nn.CrossEntropyLoss()
self.vocab_size = vocab_size
def forward(
self, logit_out=None, logit_chunk_perm=None, target=None, chunk_perm=None,
):
"""
logit_out: (bs, len, vocab_size) tensor
logit_chunk_perm: (bs, 2) tensor
target: (bs, len) tensor
chunk_perm: (bs, 1)
"""
# to do: Check Logic
global_petrub = 1 - chunk_perm
masked_lm_loss = self.masked_loss(logit_out.view(-1, self.vocab_size), target.view(-1))
chunk_perm_loss = self.chunk_perm_loss(logit_chunk_perm, chunk_perm)
loss = (chunk_perm * chunk_perm_loss) + (1 - global_petrub * masked_lm_loss)
return loss
class GoPFAMLoss(nn.Module):
def __init__(self, weights=[1., 1.]):
"""
weights: [float, float] weights for combining loss for go and pfam
"""
super().__init__()
self.weights = weights
self.go_loss = nn.BCEWithLogitsLoss(reduction="none")
self.pfam_loss = nn.BCEWithLogitsLoss(reduction="none")
def forward(self, logit_go=None, logit_pfam=None, target_go=None, target_pfam=None):
"""
logit_go: (bs, go_n_classes)
logit_pfam: (bs, pfam_n_classes)
target_go: (bs, go_n_classes)
target_pfam: (bs, pfam_n_classes)
"""
# When label belongs to class 0 means no label, we ignore the loss
go_weights = torch.argmax(target_go, dim=-1).clamp(0,1)
pfam_weights = torch.argmax(target_pfam, dim=-1).clamp(0,1)
go_loss = (self.go_loss(logit_go, target_go).mean(dim=-1) * go_weights).sum()
pfam_loss = (self.pfam_loss(logit_pfam, target_pfam).mean(dim=-1) * pfam_weights).sum()
if go_weights.sum() > 0:
go_loss = go_loss/go_weights.sum()
if pfam_weights.sum() > 0:
pfam_loss = pfam_loss/pfam_weights.sum()
combined_loss = go_loss * self.weights[0] + pfam_loss * self.weights[1]
return combined_loss
class LossWrapper(nn.Module):
def __init__(self, lm_loss: None, aux_loss: None, weights=[1., 1.]):
"""
Combines AminoBERTLoss with GoPFAMLoss
"""
super().__init__()
self.lm_loss = lm_loss
self.aux_loss = aux_loss
self.weights = weights
def forward(
self,
logit_out=None,
logit_chunk_perm=None,
logit_go=None,
logit_pfam=None,
target_go=None,
target_pfam=None,
chunk_perm=None,
target=None,
):
lm_loss = self.lm_loss(
logit_out=logit_out,
logit_chunk_perm=logit_chunk_perm,
target=target,
chunk_perm=chunk_perm,
)
aux_loss = self.aux_loss(
logit_go=logit_go,
logit_pfam=logit_pfam,
target_go=target_go,
target_pfam=target_pfam,
)
combined_loss = lm_loss * self.weights[0] + aux_loss * self.weights[1]
return combined_loss | rgn2-replica-main | rgn2_replica/losses.py |
# Author: Eric Alcaide ( @hypnopump )
import random
import math
import torch
import numpy as np
# utils specific for the LM
def chunk_permute(seq):
""" Permutes a chunk from the sequence.
Inputs:
* seq: (N,) tensor.
Outputs:
* seq: (N,) tensor
* labels: (N,) long tensor (idx of each chunk)
"""
x = random.randint(2, 10)
step = math.ceil( seq.shape[0] / x )
seq_ = seq.clone()
perms, labels = [], []
for i in range(x):
chunk = seq_[i*step:(i+1)*step].numpy()
np.random.shuffle(chunk)
perms.append( torch.from_numpy(chunk) )
labels.append( torch.ones_like(perms[-1]) * i )
perms = torch.cat( perms , dim=0).to(seq.device)
labels = torch.cat( labels , dim=0).to(seq.device)
return perms, labels
def masked_lang(seq, mask_tok=99, prop_len=0.15, lam=2.5):
""" Masks contiguous positions for masked language modelling
Inputs:
* seq: (N,) tensor
* mask_tok: int. mask token.
* prop_len: float. proportion of the length to mask
* lam: float. lambda for the poisson distribution.
Outputs: (N,)
"""
seq_ = seq.clone()
# set mask features
clump_size = int( np.random.poisson(lam=lam) + 1 )
n_mask = int( seq.shape[0] * prop_len )
# get maskable idxs
idxs = list(range(seq.shape[0] - clump_size))
# do contiguous mask iteratively
for i in range( int(n_mask/clump_size) ):
choice = np.random.choice(idxs)
seq_[choice:choice+clump_size] = mask_tok
# eliminate contiguous positions
for j in range(clump_size):
idxs.remove(choice+j)
return seq_
def mask_seq(seq, mask_tok=99, prop_len=0.15, lam=2.5):
""" Masks a sequence as described in paper - page 16
https://www.biorxiv.org/content/10.1101/2021.08.02.454840v1.full.pdf
Inputs:
* seq: (N,) tensor
Outputs:
* seq: (N,) tensor
* chunk_permte: bool (indicates seq has been chunk-permutted)
* labels: (N,) tensor. Chunk belonging of each AA
"""
p = random.random()
labels = None
# chunk permutation
if p < 0.3:
# modify (prob=0.35) or unchanged
if p < 0.3*0.35: seq, labels = chunk_permute(seq)
else: pass
# masked language modelling
else:
# normal mask or clumping depending on prob - regulate lambda
lam_eff = 0 if p < ( 0.3 + 0.7 * (1 - 0.3) ) else lam
seq = masked_lang(seq, mask_tok=mask_tok,
prop_len=prop_len, lam=lam_eff)
# create chunk labels
if labels is None:
labels = torch.zeros_like(seq)
return seq, p < 0.3*0.35, labels
| rgn2-replica-main | rgn2_replica/lm_utils.py |
# Author: Eric Alcaide ( @hypnopump )
import os
import sys
from typing import Optional, Tuple, List
# science
import numpy as np
# ML
import torch
import torch.nn.functional as F
from x_transformers import XTransformer, Encoder
from einops import rearrange, repeat
# custom
import mp_nerf
from rgn2_replica.utils import *
#######################
#### USEFUL PIECES ####
#######################
@torch.jit.script
def prediction_wrapper(x: torch.Tensor, pred: torch.Tensor):
""" Facilitates recycling. Inputs the original input + prediction
Returns a new original input.
This case is specific for this task, but could be adapted.
Inputs:
* x: (B, L, Emb_dim) float tensor. Emb dim incorporates already pred_dim
* pred: (B, L, pred_dim)
Outputs: (B, L, Emb_dim)
"""
# ensure preds' first values
pred[:, 0, [0, 2]] = 0.
pred[:, 0, [1, 3]] = 1.
pred[:, 1, 2] = 0.
pred[:, 1, 3] = 1.
# refill x with preds
x_ = x.clone()
x_[:, :-1, -pred.shape[-1]:] = pred.detach()
return x_
def pred_post_process(points_preds: torch.Tensor,
seq_list: Optional[List] = None,
mask: Optional[torch.Tensor] = None):
""" Converts an angle-based output to structures.
Inputs:
* points_preds: (B, L, 2, 2)
* seq_list: (B,) list of str. FASTA sequences. Optional. build scns
* mask: (B, L) bool tensor.
Outputs:
* ca_trace_pred: (B, L, 14, 3)
* frames_preds: (B, L, 3, 3)
* wrapper_pred: (B, L, 14, 3)
"""
device = points_preds.device
if mask is None:
mask = torch.ones(points_preds.shape[:-2], dtype=torch.bool)
# restate first values to known ones (1st angle, 1s + 2nd dihedral)
points_preds[:, 0, [0, 1], 1] = 1.
points_preds[:, 0, [0, 1], 0] = 0.
points_preds[:, 1, 1, 1] = 1.
points_preds[:, 1, 1, 0] = 0.
# rebuild ca trace with angles - norm vectors to ensure mod=1. - (B, L, 14, 3)
ca_trace_pred = torch.zeros(*points_preds.shape[:-2], 14, 3, device=device)
ca_trace_pred[:, :, 1], frames_preds = mp_nerf.proteins.ca_from_angles(
(points_preds / (points_preds.norm(dim=-1, keepdim=True) + 1e-7)).reshape(
points_preds.shape[0], -1, 4
)
)
ca_trace_pred = mp_nerf.utils.ensure_chirality(ca_trace_pred)
# calc BB - can't do batched bc relies on extremes.
wrapper_pred = torch.zeros_like(ca_trace_pred)
for i in range(points_preds.shape[0]):
wrapper_pred[i, mask[i]] = mp_nerf.proteins.ca_bb_fold(
ca_trace_pred[i:i+1, mask[i], 1]
)
if seq_list is not None:
# build sidechains
scaffolds = mp_nerf.proteins.build_scaffolds_from_scn_angles(seq=seq_list[i], device=device)
wrapper_pred[i, mask[i]], _ = mp_nerf.proteins.sidechain_fold(
wrapper_pred[i, mask[i]], **scaffolds, c_beta="backbone"
)
return points_preds, ca_trace_pred, frames_preds, wrapper_pred
# adapt LSTM to batch api
class LSTM(torch.nn.modules.rnn.RNNBase):
def __init__(self, input_size, hidden_size, bias=True, num_layers=1,
batch_first=True, dropout=0, bidirectional=False):
""" Custom LSTM layer which supports batching by mask
* input_size: read pytorch docs - LSTM
* hidden_size: read pytorch docs - LSTM
* bias: read pytorch docs - LSTM
* num_layers: int. number of layers. only supports 1 for now.
* batch_first: bool. input should be (B, L, D) if True,
(L, B, D) if False
* dropout: float. amount of dropout to add to inputs.
Not supported
* bidirectional: bool. whether layer is bidirectional. Not supported.
"""
super().__init__(
mode='LSTM', input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, bias=bias, batch_first=batch_first,
dropout=dropout, bidirectional=bidirectional)
self.num_layers = num_layers
self.batch_first = batch_first
self.lstm_cell = torch.nn.modules.rnn.LSTMCell(input_size, hidden_size, bias)
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / np.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input_: torch.Tensor,
hx_ : Optional[torch.Tensor]=None,
cx_ : Optional[torch.Tensor]=None,
mask: Optional[torch.Tensor]=None
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
""" Accepts inputs of variable length and resolves by masking.
~Assumes sequences are ordered by descending length.~
"""
device = input_.device
n_batch, n_seq, n_feat = input_.size()
# same as in https://pytorch.org/docs/stable/_modules/torch/nn/modules/rnn.html#LSTM
if hx_ is None:
hx_ = torch.zeros(input_.size(0), self.hidden_size, dtype=input_.dtype,
device=device)
if cx_ is None:
cx_ = torch.zeros(input_.size(0), self.hidden_size, dtype=input_.dtype,
device=device)
if mask is None:
mask = torch.ones(input_.shape[:-1], dtype=torch.bool, device=device)
steps = []
unbind_dim = 1 if self.batch_first else 0
for seq, mask_ in zip(input_.unbind(unbind_dim), mask.unbind(unbind_dim)):
for k in range(self.num_layers):
# select appropiate by masking
masked_input = seq[mask_] # (B, D) -> (D,)
masked_hx, masked_cx = hx_[mask_], cx_[mask_]
# pass
hx_[mask_], cx_[mask_] = self.lstm_cell(masked_input, (masked_hx, masked_cx))
# record hiddens
steps.append(hx_.clone())
outs = torch.stack(steps, dim=1)
if not self.batch_first:
outs = outs.transpose(0,1)
return outs, (hx_, cx_)
##############
### MODELS ###
##############
class RGN2_Transformer(torch.nn.Module):
def __init__(self, embedding_dim=1280, hidden=[512], mlp_hidden=[128, 4],
act="silu", x_transformer_config={
"depth": 8,
"heads": 4,
"attn_dim_head": 64,
# "attn_num_mem_kv": 16, # 16 memory key / values
"use_scalenorm": True, # set to true to use for all layers
"ff_glu": True, # set to true to use for all feedforwards
"attn_collab_heads": True,
"attn_collab_compression": .3,
"cross_attend": False,
"gate_values": True, # gate aggregated values with the input"
# "sandwich_coef": 6, # interleave attention and feedforwards with sandwich coefficient of 6
"rotary_pos_emb": True # turns on rotary positional embeddings"
}
):
""" Transformer drop-in for RGN2-LSTM.
Inputs:
* layers: int. number of rnn layers
* mlp_hidden: list of ints.
"""
super(RGN2_Transformer, self).__init__()
act_types = {
"relu": torch.nn.ReLU,
"silu": torch.nn.SiLU,
}
# store params
self.embedding_dim = embedding_dim
self.hidden = hidden
self.mlp_hidden = mlp_hidden
# declare layers
""" Declares an XTransformer model.
* No decoder, just predict embeddings
* project with a lst_mlp
"""
self.to_latent = torch.nn.Linear(self.embedding_dim, self.hidden[0])
self.transformer = Encoder(
dim= self.hidden[-1],
**x_transformer_config
)
self.last_mlp = torch.nn.Sequential(
torch.nn.Linear(self.hidden[-1], self.mlp_hidden[0]),
act_types[act](),
torch.nn.Linear(self.mlp_hidden[0], self.mlp_hidden[-1])
)
def forward(self, x, mask : Optional[torch.Tensor] = None,
recycle:int = 1, inter_recycle:bool = False):
""" Inputs:
* x (B, L, Emb_dim)
Outputs: (B, L, 4).
Note: 4 last dims of input is angles of previous point.
for first point, add dumb token [-5, -5, -5, -5]
"""
# same input for both rgn2-stm and transformer, so mask angles
r_iters = []
x_buffer = x.clone() if recycle > 1 else x # buffer for recycling
x[..., -4:] = 0.
for i in range(max(1, recycle)):
x_pred = self.to_latent(x)
x_pred = self.transformer(x_pred, mask=mask)
x_pred = self.last_mlp(x_pred)
# cat predictions to tokens for recycling
if i < recycle:
# normalize angles to avoid unstability
angles = x_pred.detach()[:, :-1].reshape(x.shape[0], -1, 2, 2)
angles = F.normalize(angles, dim=-1).reshape(x.shape[0], -1, 4)
# regen inputs
x = prediction_wrapper(x_buffer, angles)
# store and return intermediate steps - only if not last
if inter_recycle:
r_iters.append(x_pred.detach())
r_iters = torch.stack(r_iters, dim=-3) if inter_recycle else \
torch.empty(x.shape[0], recycle-1, device=x.device) # (B, recycle-1, L, 4)
return x_pred, r_iters
def predict_fold(self, x, mask : Optional[torch.Tensor] = None,
recycle:int = 1, inter_recycle:bool = False):
""" Predicts all angles at once so no need for AR prediction.
Same inputs / outputs than
"""
with torch.no_grad():
return self.forward(
x=x, mask=mask,
recycle=recycle, inter_recycle=inter_recycle
)
class RGN2_Naive(torch.nn.Module):
def __init__(self, layers=3, emb_dim=1280, hidden=256,
bidirectional=False, mlp_hidden=[32, 4], layer_type="LSTM",
act="silu", input_dropout=0.0, angularize=False):
""" RGN2 module which turns embeddings into a Cα trace.
Inputs:
* layers: int. number of rnn layers
* emb_dim: int. number of dimensions in the input
* hidden: int or list of ints. hidden dim at each layer
* bidirectional: bool. whether to use bidirectional LSTM
* mlp_hidden: list of ints. dims for final MLP dimensions
* layer_type: str. options present in `self.layer_types`
* act: str. options present in `self.act_types`
* input_dropout: float. dropout applied before all recurrent
layers independently
* angularize: bool. whether to do single-value regression (False)
or predict a set of alphabet torsions (True).
"""
super(RGN2_Naive, self).__init__()
hidden_eff = lambda x: x + x*int(bidirectional)
layer_types = {
"LSTM": LSTM, # torch.nn.LSTM,
"GRU": torch.nn.GRU,
}
act_types = {
"relu": torch.nn.ReLU,
"silu": torch.nn.SiLU,
}
# store params
self.layer_type = layer_type
self.num_layers = layers
self.hidden = [emb_dim]+hidden if isinstance(hidden, list) else \
[emb_dim] + [hidden]*layers
self.bidirectional = bidirectional
self.mlp_hidden = mlp_hidden
self.angularize = angularize
# declare layers
self.dropout = input_dropout # could use `Dropout2d`
self.dropout_l = torch.nn.Dropout(p=self.dropout) if input_dropout else \
torch.nn.Identity()
self.stacked_lstm_f = torch.nn.ModuleList([
layer_types[self.layer_type](
# double size of input (cat of lstm_f, lstm_b) if not first layer
input_size = hidden_eff(self.hidden[i]) if i!= 0 else self.hidden[i],
hidden_size = self.hidden[1],
batch_first = True,
bidirectional = False,
num_layers = 1,
) for i in range(layers)
])
# add backward lstm
if self.bidirectional:
self.stacked_lstm_b = torch.nn.ModuleList([
layer_types[self.layer_type](
# double size of input (cat of lstm_f, lstm_b) if not first layer
input_size = hidden_eff(self.hidden[i]) if i!= 0 else self.hidden[i],
hidden_size = self.hidden[1],
batch_first = True,
bidirectional = False,
num_layers = 1,
) for i in range(layers)
])
# jit-COMPILE if custom LSTM
if isinstance(self.stacked_lstm_f, LSTM):
self.stacked_lstm_f = torch.nn.ModuleList([
torch.jit.script(self.stacked_lstm_f[i]) for i in range(self.num_layers)
])
self.stacked_lstm_b = torch.nn.ModuleList([
torch.jit.script(self.stacked_lstm_b) for i in range(self.num_layers)
])
self.last_mlp = torch.nn.Sequential(
torch.nn.Linear(hidden_eff(self.hidden[-1]), self.mlp_hidden[0]),
act_types[act](),
torch.nn.Linear(self.mlp_hidden[0], self.mlp_hidden[-1])
)
# declare infra needed for angularization level
if self.angularize:
self.angs = torch.nn.Parameter( # init custom param to -pi, pi
(2*torch.rand(1, 1, 2, self.mlp_hidden[-1]//2) - 1) * np.pi # final pred / 2
)
self.register_parameter("angles", self.angs)
def forward(self, x:torch.Tensor, mask: Optional[torch.Tensor] = None,
recycle:int = 1, inter_recycle:bool = False):
""" Inputs:
* x: (B, L, Emb_dim)
* mask: ((B), L) bool. whether to predict point.
* recycle: int. recycling iterations
* inter_recycle: bool. whether to provide intermediate
recycling iterations.
* input_dropout: float. dropout quantity at input.
Outputs:
* x_pred: (B, L, 4).
* r_iters: list (recycle-1, B, L, 4)
Note: 4 last dims of input is angles of previous point.
for first point, add dumb token [-5, -5, -5, -5]
"""
r_iters = []
x_buffer = x.clone() if recycle > 1 else x # buffer for iters
if mask is None:
seq_lens = torch.tensor([x.shape[1]]*x.shape[0], dtype=torch.long)
else:
seq_lens = mask.sum(dim=-1).long()
for i in range( max(1, recycle) ):
# do N layers, cat directions between them
x_pred = x.clone() if self.num_layers > 1 else x # buffer for layers
for k in range(self.num_layers):
x_f, (h_n, c_n) = self.stacked_lstm_f[k](
self.dropout_l(x_pred) , mask=mask
)
if self.bidirectional:
# reverse - only the sequence part
x_b = x_pred.clone()
for l, length in enumerate(seq_lens):
x_b[l, :length] = torch.flip(x_b[l, :length], dims=(-2,))
# back pass
x_b, (h_n_b, c_n_b) = self.stacked_lstm_b[k](
self.dropout_l(x_b), mask=mask
)
# reverse again to match forward direction
for l, length in enumerate(seq_lens):
x_b[l, :length] = torch.flip(x_b[l, :length], dims=(-2,))
# merge w/ forward direction
x_pred = torch.cat([x_f, x_b], dim=-1)
else:
x_pred = x_f
x_pred = self.last_mlp(x_pred)
if self.angularize:
x_pred = self.turn_to_angles(x_pred)
# cat predictions to tokens for recycling
if i < recycle:
# normalize angles to avoid unstability
angles = x_pred.detach()[:, :-1].reshape(x.shape[0], -1, 2, 2)
angles = F.normalize(angles, dim=-1).reshape(x.shape[0], -1, 4)
# regen inputs
x = prediction_wrapper(x_buffer, angles)
# store and return intermediate steps - only if not last
if inter_recycle:
r_iters.append(x_pred.detach())
r_iters = torch.stack(r_iters, dim=-3) if inter_recycle else \
torch.empty(x.shape[0], recycle-1, device=x.device) # (B, recycle-1, L, 4)
return x_pred, r_iters
def predict_fold(self, x, mask : Optional[torch.Tensor] = None,
recycle : int = 1, inter_recycle : bool = False):
""" Autoregressively generates the protein fold
Inputs:
* x: ((B), L, Emb_dim)
* mask: ((B), L) bool. whether to predict sequence.
* recycle: int. recycling iterations
* inter_recycle: bool. whether to provide intermediate
recycling iterations.
Outputs:
* x_pred: ((B), L, 4)
* r_iters: list (recycle-1, B, L, 4)
Note: 4 last dims of input is dumb token for first res.
Use same as in `.forward()` method.
"""
# default mask is everything
if mask is None:
mask = torch.ones(x.shape[:-1], dtype=torch.bool, device=x.device)
# handles batch shape
squeeze = len(x.shape) == 2
if squeeze:
x = x.unsqueeze(dim=0)
mask = mask.unsqueeze(dim=0)
# no gradients needed for prediction
with torch.no_grad():
r_policy = 1
for i in range(x.shape[-2]):
# only recycle (if set to) in last iter - saves time
if i < ( x.shape[-2] - 1 ):
r_policy = recycle
input_step = x[mask[:, i], :i+1] # (B, 0:i+1, 4)
preds, r_iters = self.forward( # (B, 1:i+2, 4)
input_step, recycle = r_policy, inter_recycle=inter_recycle
)
# only modify if it's not last iter. last angle is not needed
if i < ( x.shape[-2] - 1 ):
x[mask[:, i], 1:i+2, -4:] = preds
# re-handles batch shape
return preds.squeeze() if squeeze else preds, r_iters
def turn_to_angles(self, preds, angles=2):
""" Turns a softmax prediction (B, L, N*angles) -> (B, L, 2*angles). """
probs = F.softmax( rearrange(preds, "b l (a n) -> b l a n", a=angles), dim=-1 ) # (B, L, angles, N)
angles = mp_nerf.utils.circular_mean(angles=self.angles, weights=probs) # (B, L, angles)
angles = mp_nerf.ml_utils.angle_to_point_in_circum(angles) # (B, L, angles, 2)
return rearrange(angles, "b l a n -> b l (a n)") # (B, L, 2 * angles)
class Refiner(torch.nn.Module):
""" Refines a protein structure by invoking several Rosetta scripts. """
def __init__(self, **kwargs):
return
| rgn2-replica-main | rgn2_replica/rgn2.py |
# Author: Gurvinder Singh (@gurvindersingh)
import argparse
import os
import pytorch_lightning as pl
from rgn2_replica.dataset import ProteinLMDataModule
from rgn2_replica.tokenizer import Tokenizer
from rgn2_replica.lmmodel import ProteinLMModel
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import ModelCheckpoint
def parse_arguments():
parser = argparse.ArgumentParser(description='Train protein language model')
parser.add_argument("--ds", default="data/ur90_small", help="Dataset path")
parser.add_argument("--accelerator", default="dp", help="Type of accelerator to train model. dp, ddp, ddp_spawn etc.")
parser.add_argument("--output_dir", default="models_ckpt", help="Output directory to store model checkpoints")
parser.add_argument("--gpus", default=0, help="Number of gpus to train on", type=int)
parser.add_argument("--precision", default=32, help="Percision for training 16 or 32", type=int)
parser.add_argument("--max_len", default=512, help="Maximum length per sequence", type=int)
parser.add_argument("--pfam_size", default=4928, help="Total number of unique PFAM families in dataset", type=int)
parser.add_argument("--go_size", default=11567, help="Total number of unique Go annotations in dataset", type=int)
parser.add_argument("--epochs", default=10, help="Number of epochs", type=int)
parser.add_argument("--bsize", default=16, help="Batch size per GPU", type=int)
parser.add_argument("--lr", default=2e-4, help="Learning rate", type=float)
return parser.parse_args()
def train(args):
data = ProteinLMDataModule(args.ds, args.pfam_size, args.go_size, max_len=args.max_len)
tokenizer = Tokenizer()
model = ProteinLMModel(len(tokenizer.index), args.pfam_size, args.go_size, args.lr, max_len=args.max_len, bsize=args.bsize)
os.makedirs(args.output_dir, exist_ok=True)
pl.seed_everything(42)
wandb_logger = WandbLogger(project="ProteinLM")
checkpoint_callback = ModelCheckpoint(
monitor="valid_loss",
dirpath=args.output_dir,
filename="lmmodel-{epoch:02d}-{valid_loss:.2f}",
save_top_k=3,
mode="min",
)
trainer = pl.Trainer(gpus=args.gpus, precision=args.precision,
accelerator=args.accelerator,max_epochs=args.epochs,
logger=wandb_logger,callbacks=[checkpoint_callback],
)
trainer.fit(model, data)
if __name__ == '__main__':
args = parse_arguments()
train(args)
| rgn2-replica-main | scripts/lmtrainer.py |
# Author: Eirc Alcaide (@hypnopump)
import os
import re
import numpy as np
import torch
# process
import argparse
import joblib
from tqdm import tqdm
# custom
import esm
import sidechainnet
import mp_nerf
from rgn2_replica import *
from rgn2_replica.rosetta_refine import *
from rgn2_replica.utils import seqs_from_fasta
from rgn2_trainers import infer_from_seqs
if __name__ == "__main__":
# !python redictor.py --input proteins.fasta --model ../rgn2_models/baseline_run@_125K.pt --device 2
parser = argparse.ArgumentParser()
# inputs
parser.add_argument("--input", help="FASTA or MultiFASTA with protein sequences to predict")
parser.add_argument("--batch_size", type=int, default=1, help="batch size for prediction")
# model
parser.add_argument("--embedder_model", type=str, help="esm1b")
parser.add_argument("--model", type=str, help="Model file for prediction")
parser.add_argument("--rosetta_refine", type=int, default=0, help="refine output with Rosetta. 0 for no refinement")
parser.add_argument("--rosetta_relax", type=int, default=0, help="relax output with Rosetta. 0 for no relax.")
parser.add_argument("--coord_constraint", type=float, default=1.0, help="constraint for Rosetta relax. higher=stricter.")
parser.add_argument("--recycle", default=10, help="Recycling iterations")
parser.add_argument("--device", default="cpu", help="['cpu', 'cuda:0', 'cuda:1', ...], cpu is slow!")
# outputs
parser.add_argument("--output_path", type=str, default=None, # prot_id.fasta -> prot_id_0.fasta,
help="path for output .pdb files. Defaults to input name + seq num")
args = parser.parse_args()
# mod parsed args
if args.output_path is None:
args.output_path = args.input.replace(".fasta", "_")
# get sequences
seq_list, seq_names = seqs_from_fasta(args.input, names=True)
# predict structures
model = RGN2_Naive(
layers = 2,
emb_dim = args.emb_dim+4,
hidden = 1024,
bidirectional = True,
mlp_hidden = [128, 4],
act="silu",
layer_type="LSTM",
input_dropout=0.5,
angularize=False,
).to(args.device)
model.load_state_dict(torch.load(args.model))
model = model.eval()
# # Load ESM-1b model
embedder = get_embedder(args, device)
# batch wrapper
pred_dict = {}
num_batches = len(seq_list) // args.batch_size + \
int(bool(len(seq_list) % args.batch_size))
for i in range( num_batches ):
aux = infer_from_seqs(
seq_list[args.batch_size*i : args.batch_size*(i+1)],
model = model,
embedder = embedder,
recycle_func=lambda x: int(args.recycle),
device=args.device
)
for k,v in aux.items():
try: pred_dict[k] += v
except KeyError: pred_dict[k] = v
# save structures
out_files = []
for i, seq in enumerate(seq_list):
struct_pred = sidechainnet.StructureBuilder(
pred_dict["int_seq"][i].cpu(),
crd = pred_dict["coords"][i].reshape(-1, 3).cpu()
)
out_files.append( args.output_path+str(i)+"_"+seq_names[i]+".pdb" )
struct_pred.to_pdb( out_files[-1] )
print("Saved", out_files[-1])
# refine structs
if args.rosetta_refine:
from typing import Optional
import pyrosetta
for i, seq in enumerate(seq_list):
# only refine
if args.rosetta_relax == 0:
quick_refine(
in_pdb = out_files[i],
out_pdb = out_files[i][:-4]+"_refined.pdb",
min_iter = args.rosetta_refine
)
# refine and relax
else:
relax_refine(
out_files[i],
out_pdb=out_files[i][:-4]+"_refined_relaxed.pdb",
min_iter = args.rosetta_refine,
relax_iter = args.rosetta_relax,
coord_constraint = args.coord_constraint,
)
print(out_files[i], "was refined successfully")
print("All tasks done. Exiting...")
| rgn2-replica-main | scripts/rgn2_predict_fold.py |
import os
import argparse
import random
import numpy as np
import wandb
import torch
import esm
import sidechainnet
from sidechainnet.utils.sequence import ProteinVocabulary as VOCAB
# IMPORTED ALSO IN LATER MODULES
VOCAB = VOCAB()
import mp_nerf
from rgn2_replica.rgn2_trainers import *
from rgn2_replica.embedders import *
from rgn2_replica import set_seed, RGN2_Naive
def parse_arguments():
parser = argparse.ArgumentParser(description='Train RGN2 model')
# logging
parser.add_argument("--device", help="Device ('cpu', cuda:0', ...)", type=str, required=True)
parser.add_argument("--wb_proj", help="W & B project name", type=str, default=None)
parser.add_argument("--wb_entity", help="W & B entity", type=str, default=None)
parser.add_argument("--run_name", help="Experiment name", type=str, required=True)
# run handling
parser.add_argument("--resume_name", help="model path to load and resume", type=str, default=None)
parser.add_argument("--resume_iters", help="num of iters to resume training at", type=int, default=0)
# data params
parser.add_argument("--min_len", help="Min seq len, for train", type=int, default=0)
parser.add_argument("--min_len_valid", help="Min seq len, for valid", type=int, default=0)
parser.add_argument("--max_len", help="Max seq len", type=int, default=512)
parser.add_argument("--casp_version", help="SCN dataset version", type=int, default=12)
parser.add_argument("--scn_thinning", help="SCN dataset thinning", type=int, default=90)
parser.add_argument("--xray", help="only use xray structures", type=bool, default=0)
# model params
parser.add_argument("--embedder_model", help="Embedding model to use", default='esm1b')
parser.add_argument("--num_layers", help="num rnn layers", type=int, default=2)
parser.add_argument("--emb_dim", help="embedding dimension", type=int, default=1280)
parser.add_argument("--hidden", help="hidden dimension", type=int, default=1024)
parser.add_argument("--act", help="hideen activation", type=str, default="silu")
parser.add_argument("--layer_type", help="rnn layer type", type=str, default="LSTM")
parser.add_argument("--input_dropout", help="input dropout", type=float, default=0.5)
parser.add_argument("--bidirectional", help="bidirectionality", type=bool, default=0)
parser.add_argument("--angularize", help="angularization units. 0 for reg", type=int, default=0)
parser.add_argument("--num_recycles_train", type=int, default=3,
help="number of recycling iters. set to 1 to speed training.",)
parser.add_argument("--seed", help="Random seed", default=42)
return parser.parse_args()
def load_dataloader(args):
dataloaders = sidechainnet.load(
casp_version=args.casp_version,
thinning=args.scn_thinning,
with_pytorch="dataloaders",
batch_size=1, dynamic_batching=False
)
return dataloaders
def save_as_txt(*items, path):
if "/" in path:
folder = "/".join(path.split("/")[:-1])
os.makedirs(folder, exist_ok=True)
with open(path, "a") as f:
for item in items:
try:
for line in item:
f.write(str(line)+"\n")
except Exception as e:
print("Error in saving:", e)
f.write(str(item))
f.write("\n")
def init_wandb_config(args):
wandb.init(project=args.wb_proj, entity=args.wb_entity, name=args.run_name)
# 2. Save model inputs and hyperparameters
config = wandb.config
config.seed = args.seed
config.device = args.device
config.embedder_model = args.embedder_model
config.scn_version = str(args.casp_version)+"-"+str(args.scn_thinning)
config.min_len = args.min_len
config.max_len = args.max_len
config.xray = bool(args.xray)
# model hyperparams
config.num_layers = args.num_layers
config.emb_dim = args.emb_dim
config.hidden = args.hidden
config.mlp_hidden = [128, 4 if args.angularize == 0 else args.angularize] # 4 # 64
config.act = args.act # "silu"
config.layer_type = args.layer_type # "LSTM"
config.input_dropout = args.input_dropout
config.bidirectional = bool(args.bidirectional) # True
config.max_recycles_train = args.num_recycles_train # set up to 1 to speed things
config.angularize = bool(args.angularize)
return config
def init_and_train(args):
config = init_wandb_config(args)
dataloaders = load_dataloader(args)
print('loaded dataloaders')
embedder = get_embedder(config, config.device)
print('loaded embedder')
config = init_wandb_config(args)
results = run_train_schedule(dataloaders, embedder, config, args)
save_as_txt(
[args, config, *results],
path = "rgn2_models/"+wandb.run.name.replace("/", "_")+"_logs.txt"
)
def run_train_schedule(dataloaders, embedder, config, args):
valid_log_acc = []
device = torch.device(config.device)
embedder = embedder.to(device)
set_seed(config.seed)
model = RGN2_Naive(layers=config.num_layers,
emb_dim=config.emb_dim+4,
hidden=config.hidden,
bidirectional=config.bidirectional,
mlp_hidden=config.mlp_hidden,
act=config.act,
layer_type=config.layer_type,
input_dropout=config.input_dropout,
angularize=config.angularize,
).to(device)
if args.resume_name is not None:
model.load_state_dict(torch.load(args.resume_name))
# 3. Log gradients and model parameters
wandb.watch(model)
steps = get_training_schedule(args)
resume = True # declare new optim
for i, (batch_num, ckpt, lr, batch_size, max_len, clip, loss_f, seed) in enumerate(steps):
# reconfig batch otpions
wandb.log({
'learning_rate': lr,
'batch_size': batch_size
}, commit=False)
if sum([steps[j][0] for j in range(i)]) < args.resume_iters: continue
if resume:
if seed is not None:
set_seed(seed)
get_prot_ = mp_nerf.utils.get_prot(
dataloader_=dataloaders,
vocab_=VOCAB,
min_len=config.min_len, max_len=max_len, # MAX_LEN,
verbose=False, subset="train",
xray_filter=config.xray,
)
optimizer = torch.optim.Adam(params=model.parameters(), lr=lr)
resume = False
else:
for g in optimizer.param_groups:
g['lr'] = lr
# train
metrics_stuff = train(
get_prot_=get_prot_,
steps=batch_num,
model=model,
embedder=embedder,
optim=optimizer,
loss_f=loss_f, # + 0.005 * metrics['drmsd'].mean()",
clip=clip,
accumulate_every=batch_size,
log_every=4,
seed=seed,
recycle_func=lambda x: random.randint(1, config.max_recycles_train), # 1
wandbai=True,
)
metric = np.mean([x["drmsd"] for x in metrics_stuff[0][-5*batch_size:]])
print("\nCheckpoint {0} @ {1}, pass @ {2}. Metrics mean train = {1}\n".format(
i, ckpt, metric, metrics_stuff[-1]
))
# save
os.makedirs('rgn2_models', exist_ok=True)
save_path = "rgn2_models/"+wandb.run.name.replace("/", "_")+"@_{0}K.pt".format(
sum(p[0] for p in steps[:i+1]) // 1000
)
torch.save(model.state_dict(), save_path)
## VALIDATING
for valid_set in [10, 20, 30, 40, 50, 70, 90]:
print("Validating "+str(valid_set))
tic = time.time()
get_prot_valid_ = mp_nerf.utils.get_prot(
dataloader_=dataloaders,
vocab_=VOCAB, # mp_nerf.utils.
min_len=args.min_len_valid, max_len=max_len,
verbose=False, subset="valid-"+str(valid_set)
)
# get num of unique, full-masked proteins
seqs = []
for i, prot_args in enumerate(dataloaders["valid-"+str(valid_set)].dataset):
# (id, int_seq, mask, ... , str_seq)
length = len(prot_args[-1])
if args.min_len_valid < length < max_len and sum( prot_args[2] ) == length:
seqs.append( prot_args[-1] )
metrics_stuff_eval = predict(
get_prot_= get_prot_valid_,
steps = len(set(seqs)), # 24 for MIN_LEN=70
model = model,
embedder = embedder,
return_preds = True,
log_every = 4,
accumulate_every = len(set(seqs)),
seed = 0, # 42
mode = "fast_test", # "test" # "test" is for AR, "fast_test" is for iterative
recycle_func = lambda x: 10, # 5 # 3 # 2
wandbai = False,
)
preds_list_eval, metrics_list_eval, metrics_stats_eval = metrics_stuff_eval
print("\n", "Eval Results:", sep="")
for k,v in metrics_stats_eval.items():
offset = " " * ( max(len(ki) for ki in metrics_stats_eval.keys()) - len(k) )
print(k + offset, ":", v)
print("\n")
print("Time taken: ", time.time()-tic, "\n")
# save logs to compare runs - wandb not enough
valid_log_acc.append(metrics_stats_eval)
# ABORT OR CONTINUE: mean of last 5 batches below ckpt
if metric > ckpt:
print("ABORTING")
print("Didn't pass ckpt {0} @ drmsd = {1}, but instead drmsd = {2}".format(
i, ckpt, metric
))
break
os.makedirs('rgn2_models', exist_ok=True)
save_path = "rgn2_models/"+wandb.run.name.replace("/", "_")+"@_{0}K.pt".format(
sum(p[0] for p in steps[:i+1]) // 1000
)
torch.save(model.state_dict(), save_path)
### TEST
tic = time.time()
get_prot_test_ = mp_nerf.utils.get_prot(
dataloader_=dataloaders,
vocab_=VOCAB, # mp_nerf.utils.
min_len=args.min_len_valid, max_len=max_len,
verbose=False, subset="test"
)
# get num of unique, full-masked proteins
seqs = []
for i, prot_args in enumerate(dataloaders["test"].dataset):
# (id, int_seq, mask, ... , str_seq)
length = len(prot_args[-1])
if args.min_len_valid < length < max_len and sum( prot_args[2] ) == length:
seqs.append( prot_args[-1] )
metrics_stuff_test = predict(
get_prot_= get_prot_test_,
steps = len(set(seqs)), # 24 for MIN_LEN=70
model = model,
embedder = embedder,
return_preds = True,
log_every = 4,
accumulate_every = len(set(seqs)),
seed = 0, # 42
mode = "fast_test", # "test" # "test" is for AR, "fast_test" is for iterative
recycle_func = lambda x: 10, # 5 # 3 # 2
wandbai = False,
)
preds_list_test, metrics_list_test, metrics_stats_test = metrics_stuff_test
print("\n", "Test Results:", sep="")
for k,v in metrics_stats_test.items():
offset = " " * ( max(len(ki) for ki in metrics_stats_test.keys()) - len(k) )
print(k + offset, ":", v)
print("\n")
print("Time taken: ", time.time()-tic, "\n")
return metrics_stats_eval, valid_log_acc
def get_training_schedule(args):
loss_f = " metrics['drmsd'].mean() / len(infer['seq']) "
# steps, ckpt, lr , bs , max_len, clip, loss_f
return [[32000, 135 , 1e-3, 8 , args.max_len, None, loss_f, 42 , ],]
if __name__ == '__main__':
args = parse_arguments()
init_and_train(args)
| rgn2-replica-main | scripts/train_rgn2.py |
from setuptools import setup, find_packages
setup(
name = 'pi-gan-pytorch',
packages = find_packages(),
version = '0.0.11',
license='MIT',
description = 'π-GAN - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/pi-gan-pytorch',
keywords = [
'artificial intelligence',
'generative adversarial network'
],
install_requires=[
'einops>=0.3',
'pillow',
'torch>=1.6',
'torchvision',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| pi-GAN-pytorch-main | setup.py |
# taken and modified from https://colab.research.google.com/drive/1rO8xo0TemN67d4mTpakrKrLp03b9bgCX#scrollTo=JovhcSy1NIhr
# will need to be refactored from 3d input to 5d (with ray direction)
import torch
import torch.nn.functional as F
from einops import repeat, rearrange
def meshgrid_xy(tensor1, tensor2):
ii, jj = torch.meshgrid(tensor1, tensor2)
return ii.transpose(-1, -2), jj.transpose(-1, -2)
def cumprod_exclusive(tensor):
cumprod = torch.cumprod(tensor, dim = -1)
cumprod = torch.roll(cumprod, 1, -1)
cumprod[..., 0] = 1.
return cumprod
def get_ray_bundle(height, width, focal_length, tform_cam2world):
ii, jj = meshgrid_xy(
torch.arange(width).to(tform_cam2world),
torch.arange(height).to(tform_cam2world)
)
directions = torch.stack([(ii - width * .5) / focal_length,
-(jj - height * .5) / focal_length,
-torch.ones_like(ii)
], dim=-1)
ray_directions = torch.sum(directions[..., None, :] * tform_cam2world[:3, :3], dim=-1)
ray_origins = tform_cam2world[:3, -1].expand(ray_directions.shape)
return ray_origins, ray_directions
def compute_query_points_from_rays(
ray_origins,
ray_directions,
near_thresh,
far_thresh,
num_samples,
randomize = True
):
depth_values = torch.linspace(near_thresh, far_thresh, num_samples).to(ray_origins)
if randomize is True:
noise_shape = list(ray_origins.shape[:-1]) + [num_samples]
depth_values = depth_values \
+ torch.rand(noise_shape).to(ray_origins) * (far_thresh
- near_thresh) / num_samples
query_points = ray_origins[..., None, :] + ray_directions[..., None, :] * depth_values[..., :, None]
return query_points, depth_values
def render_volume_density(
radiance_field,
ray_origins,
depth_values
):
sigma_a = F.relu(radiance_field[..., 3])
rgb = torch.sigmoid(radiance_field[..., :3])
one_e_10 = torch.tensor([1e10], dtype=ray_origins.dtype, device=ray_origins.device)
dists = torch.cat((depth_values[..., 1:] - depth_values[..., :-1],
one_e_10.expand(depth_values[..., :1].shape)), dim=-1)
alpha = 1. - torch.exp(-sigma_a * dists)
weights = alpha * cumprod_exclusive(1. - alpha + 1e-10)
rgb_map = (weights[..., None] * rgb).sum(dim=-2)
depth_map = (weights * depth_values).sum(dim=-1)
acc_map = weights.sum(-1)
return rgb_map, depth_map, acc_map
def get_image_from_nerf_model(
model,
latents,
height,
width,
focal_length = 140,
tform_cam2world = torch.eye(4),
near_thresh = 2.,
far_thresh = 6.,
depth_samples_per_ray = 32
):
tform_cam2world = tform_cam2world.to(latents)
ray_origins, ray_directions = get_ray_bundle(height, width, focal_length,
tform_cam2world)
query_points, depth_values = compute_query_points_from_rays(
ray_origins, ray_directions, near_thresh, far_thresh, depth_samples_per_ray
)
flattened_query_points = query_points.reshape((-1, 3))
images = []
for latent in latents.unbind(0):
predictions = []
predictions.append(model(latent, flattened_query_points))
radiance_field_flattened = torch.cat(predictions, dim=0)
unflattened_shape = list(query_points.shape[:-1]) + [4]
radiance_field = torch.reshape(radiance_field_flattened, unflattened_shape)
rgb_predicted, _, _ = render_volume_density(radiance_field, ray_origins, depth_values)
image = rearrange(rgb_predicted, 'h w c -> c h w')
images.append(image)
return torch.stack(images)
| pi-GAN-pytorch-main | pi_gan_pytorch/nerf.py |
from pi_gan_pytorch.pi_gan_pytorch import Generator, Discriminator, piGAN, Trainer
| pi-GAN-pytorch-main | pi_gan_pytorch/__init__.py |
import math
from pathlib import Path
from functools import partial
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
from torch.utils.data import Dataset, DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import LambdaLR
from tqdm import trange
from PIL import Image
import torchvision
from torchvision.utils import save_image
import torchvision.transforms as T
from pi_gan_pytorch.coordconv import CoordConv
from pi_gan_pytorch.nerf import get_image_from_nerf_model
from einops import rearrange, repeat
assert torch.cuda.is_available(), 'You need to have an Nvidia GPU with CUDA installed.'
# helper
def exists(val):
return val is not None
def leaky_relu(p = 0.2):
return nn.LeakyReLU(p)
def to_value(t):
return t.clone().detach().item()
def get_module_device(module):
return next(module.parameters()).device
# losses
def gradient_penalty(images, output, weight = 10):
batch_size, device = images.shape[0], images.device
gradients = torch_grad(outputs=output, inputs=images,
grad_outputs=torch.ones(output.size(), device=device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.reshape(batch_size, -1)
l2 = ((gradients.norm(2, dim = 1) - 1) ** 2).mean()
return weight * l2
# sin activation
class Sine(nn.Module):
def __init__(self, w0 = 1.):
super().__init__()
self.w0 = w0
def forward(self, x):
return torch.sin(self.w0 * x)
# siren layer
class Siren(nn.Module):
def __init__(self, dim_in, dim_out, w0 = 1., c = 6., is_first = False, use_bias = True, activation = None):
super().__init__()
self.dim_in = dim_in
self.is_first = is_first
weight = torch.zeros(dim_out, dim_in)
bias = torch.zeros(dim_out) if use_bias else None
self.init_(weight, bias, c = c, w0 = w0)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias) if use_bias else None
self.activation = Sine(w0) if activation is None else activation
def init_(self, weight, bias, c, w0):
dim = self.dim_in
w_std = (1 / dim) if self.is_first else (math.sqrt(c / dim) / w0)
weight.uniform_(-w_std, w_std)
if bias is not None:
bias.uniform_(-w_std, w_std)
def forward(self, x, gamma = None, beta = None):
out = F.linear(x, self.weight, self.bias)
# FiLM modulation
if exists(gamma):
out = out * gamma
if exists(beta):
out = out + beta
out = self.activation(out)
return out
# mapping network
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, lr_mul = 0.1, bias = True):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim))
self.lr_mul = lr_mul
def forward(self, input):
return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul)
class MappingNetwork(nn.Module):
def __init__(self, *, dim, dim_out, depth = 3, lr_mul = 0.1):
super().__init__()
layers = []
for i in range(depth):
layers.extend([EqualLinear(dim, dim, lr_mul), leaky_relu()])
self.net = nn.Sequential(*layers)
self.to_gamma = nn.Linear(dim, dim_out)
self.to_beta = nn.Linear(dim, dim_out)
def forward(self, x):
x = F.normalize(x, dim = -1)
x = self.net(x)
return self.to_gamma(x), self.to_beta(x)
# siren network
class SirenNet(nn.Module):
def __init__(self, dim_in, dim_hidden, dim_out, num_layers, w0 = 1., w0_initial = 30., use_bias = True, final_activation = None):
super().__init__()
self.layers = nn.ModuleList([])
for ind in range(num_layers):
is_first = ind == 0
layer_w0 = w0_initial if is_first else w0
layer_dim_in = dim_in if is_first else dim_hidden
self.layers.append(Siren(
dim_in = layer_dim_in,
dim_out = dim_hidden,
w0 = layer_w0,
use_bias = use_bias,
is_first = is_first
))
self.last_layer = Siren(dim_in = dim_hidden, dim_out = dim_out, w0 = w0, use_bias = use_bias, activation = final_activation)
def forward(self, x, gamma, beta):
for layer in self.layers:
x = layer(x, gamma, beta)
return self.last_layer(x)
# generator
class SirenGenerator(nn.Module):
def __init__(
self,
*,
dim,
dim_hidden,
siren_num_layers = 8
):
super().__init__()
self.mapping = MappingNetwork(
dim = dim,
dim_out = dim_hidden
)
self.siren = SirenNet(
dim_in = 3,
dim_hidden = dim_hidden,
dim_out = dim_hidden,
num_layers = siren_num_layers
)
self.to_alpha = nn.Linear(dim_hidden, 1)
self.to_rgb_siren = Siren(
dim_in = dim_hidden,
dim_out = dim_hidden
)
self.to_rgb = nn.Linear(dim_hidden, 3)
def forward(self, latent, coors, batch_size = 8192):
gamma, beta = self.mapping(latent)
outs = []
for coor in coors.split(batch_size):
gamma_, beta_ = map(lambda t: rearrange(t, 'n -> () n'), (gamma, beta))
x = self.siren(coor, gamma_, beta_)
alpha = self.to_alpha(x)
x = self.to_rgb_siren(x, gamma, beta)
rgb = self.to_rgb(x)
out = torch.cat((rgb, alpha), dim = -1)
outs.append(out)
return torch.cat(outs)
class Generator(nn.Module):
def __init__(
self,
*,
image_size,
dim,
dim_hidden,
siren_num_layers
):
super().__init__()
self.dim = dim
self.image_size = image_size
self.nerf_model = SirenGenerator(
dim = dim,
dim_hidden = dim_hidden,
siren_num_layers = siren_num_layers
)
def set_image_size(self, image_size):
self.image_size = image_size
def forward(self, latents):
image_size = self.image_size
device, b = latents.device, latents.shape[0]
generated_images = get_image_from_nerf_model(
self.nerf_model,
latents,
image_size,
image_size
)
return generated_images
# discriminator
class DiscriminatorBlock(nn.Module):
def __init__(self, dim, dim_out):
super().__init__()
self.res = CoordConv(dim, dim_out, kernel_size = 1, stride = 2)
self.net = nn.Sequential(
CoordConv(dim, dim_out, kernel_size = 3, padding = 1),
leaky_relu(),
CoordConv(dim_out, dim_out, kernel_size = 3, padding = 1),
leaky_relu()
)
self.down = nn.AvgPool2d(2)
def forward(self, x):
res = self.res(x)
x = self.net(x)
x = self.down(x)
x = x + res
return x
class Discriminator(nn.Module):
def __init__(
self,
image_size,
init_chan = 64,
max_chan = 400,
init_resolution = 32,
add_layer_iters = 10000
):
super().__init__()
resolutions = math.log2(image_size)
assert resolutions.is_integer(), 'image size must be a power of 2'
assert math.log2(init_resolution).is_integer(), 'initial resolution must be power of 2'
resolutions = int(resolutions)
layers = resolutions - 1
chans = list(reversed(list(map(lambda t: 2 ** (11 - t), range(layers)))))
chans = list(map(lambda n: min(max_chan, n), chans))
chans = [init_chan, *chans]
final_chan = chans[-1]
self.from_rgb_layers = nn.ModuleList([])
self.layers = nn.ModuleList([])
self.image_size = image_size
self.resolutions = list(map(lambda t: 2 ** (7 - t), range(layers)))
for resolution, in_chan, out_chan in zip(self.resolutions, chans[:-1], chans[1:]):
from_rgb_layer = nn.Sequential(
CoordConv(3, in_chan, kernel_size = 1),
leaky_relu()
) if resolution >= init_resolution else None
self.from_rgb_layers.append(from_rgb_layer)
self.layers.append(DiscriminatorBlock(
dim = in_chan,
dim_out = out_chan
))
self.final_conv = CoordConv(final_chan, 1, kernel_size = 2)
self.add_layer_iters = add_layer_iters
self.register_buffer('alpha', torch.tensor(0.))
self.register_buffer('resolution', torch.tensor(init_resolution))
self.register_buffer('iterations', torch.tensor(0.))
def increase_resolution_(self):
if self.resolution >= self.image_size:
return
self.alpha += self.alpha + (1 - self.alpha)
self.iterations.fill_(0.)
self.resolution *= 2
def update_iter_(self):
self.iterations += 1
self.alpha -= (1 / self.add_layer_iters)
self.alpha.clamp_(min = 0.)
def forward(self, img):
x = img
for resolution, from_rgb, layer in zip(self.resolutions, self.from_rgb_layers, self.layers):
if self.resolution < resolution:
continue
if self.resolution == resolution:
x = from_rgb(x)
if bool(resolution == (self.resolution // 2)) and bool(self.alpha > 0):
x_down = F.interpolate(img, scale_factor = 0.5)
x = x * (1 - self.alpha) + from_rgb(x_down) * self.alpha
x = layer(x)
out = self.final_conv(x)
return out
# pi-GAN class
class piGAN(nn.Module):
def __init__(
self,
*,
image_size,
dim,
init_resolution = 32,
generator_dim_hidden = 256,
siren_num_layers = 6,
add_layer_iters = 10000
):
super().__init__()
self.dim = dim
self.G = Generator(
image_size = image_size,
dim = dim,
dim_hidden = generator_dim_hidden,
siren_num_layers = siren_num_layers
)
self.D = Discriminator(
image_size = image_size,
add_layer_iters = add_layer_iters,
init_resolution = init_resolution
)
# dataset
def cycle(iterable):
while True:
for i in iterable:
yield i
def resize_to_minimum_size(min_size, image):
if max(*image.size) < min_size:
return torchvision.transforms.functional.resize(image, min_size)
return image
class ImageDataset(Dataset):
def __init__(
self,
folder,
image_size,
transparent = False,
aug_prob = 0.,
exts = ['jpg', 'jpeg', 'png']
):
super().__init__()
self.folder = folder
self.image_size = image_size
self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]
assert len(self.paths) > 0, f'No images were found in {folder} for training'
self.create_transform(image_size)
def create_transform(self, image_size):
self.transform = T.Compose([
T.Lambda(partial(resize_to_minimum_size, image_size)),
T.Resize(image_size),
T.CenterCrop(image_size),
T.ToTensor()
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
# trainer
def sample_generator(G, batch_size):
dim = G.dim
rand_latents = torch.randn(batch_size, dim).cuda()
return G(rand_latents)
class Trainer(nn.Module):
def __init__(
self,
*,
gan,
folder,
add_layers_iters = 10000,
batch_size = 8,
gradient_accumulate_every = 4,
sample_every = 100,
log_every = 10,
num_train_steps = 50000,
lr_gen = 5e-5,
lr_discr = 4e-4,
target_lr_gen = 1e-5,
target_lr_discr = 1e-4,
lr_decay_span = 10000
):
super().__init__()
gan.D.add_layer_iters = add_layers_iters
self.add_layers_iters = add_layers_iters
self.gan = gan.cuda()
self.optim_D = Adam(self.gan.D.parameters(), betas=(0, 0.9), lr = lr_discr)
self.optim_G = Adam(self.gan.G.parameters(), betas=(0, 0.9), lr = lr_gen)
D_decay_fn = lambda i: max(1 - i / lr_decay_span, 0) + (target_lr_discr / lr_discr) * min(i / lr_decay_span, 1)
G_decay_fn = lambda i: max(1 - i / lr_decay_span, 0) + (target_lr_gen / lr_gen) * min(i / lr_decay_span, 1)
self.sched_D = LambdaLR(self.optim_D, D_decay_fn)
self.sched_G = LambdaLR(self.optim_G, G_decay_fn)
self.iterations = 0
self.batch_size = batch_size
self.num_train_steps = num_train_steps
self.log_every = log_every
self.sample_every = sample_every
self.gradient_accumulate_every = gradient_accumulate_every
self.dataset = ImageDataset(folder = folder, image_size = gan.D.resolution.item())
self.dataloader = cycle(DataLoader(self.dataset, batch_size = batch_size, shuffle = True, drop_last = True))
self.last_loss_D = 0
self.last_loss_G = 0
def step(self):
D, G, batch_size, dim, accumulate_every = self.gan.D, self.gan.G, self.batch_size, self.gan.dim, self.gradient_accumulate_every
# set appropriate image size
if self.iterations % self.add_layers_iters == 0:
if self.iterations != 0:
D.increase_resolution_()
image_size = D.resolution.item()
G.set_image_size(image_size)
self.dataset.create_transform(image_size)
# gp
apply_gp = self.iterations % 4 == 0
# train discriminator
D.train()
loss_D = 0
for _ in range(accumulate_every):
images = next(self.dataloader)
images = images.cuda().requires_grad_()
real_out = D(images)
fake_imgs = sample_generator(G, batch_size)
fake_out = D(fake_imgs.clone().detach())
divergence = (F.relu(1 + real_out) + F.relu(1 - fake_out)).mean()
loss = divergence
if apply_gp:
gp = gradient_penalty(images, real_out)
self.last_loss_gp = to_value(gp)
loss = loss + gp
(loss / accumulate_every).backward()
loss_D += to_value(divergence) / accumulate_every
self.last_loss_D = loss_D
self.optim_D.step()
self.optim_D.zero_grad()
# train generator
G.train()
loss_G = 0
for _ in range(accumulate_every):
fake_out = sample_generator(G, batch_size)
loss = D(fake_out).mean()
(loss / accumulate_every).backward()
loss_G += to_value(loss) / accumulate_every
self.last_loss_G = loss_G
self.optim_G.step()
self.optim_G.zero_grad()
# update schedulers
self.sched_D.step()
self.sched_G.step()
self.iterations += 1
D.update_iter_()
def forward(self):
for _ in trange(self.num_train_steps):
self.step()
if self.iterations % self.log_every == 0:
print(f'I: {self.gan.D.resolution.item()} | D: {self.last_loss_D:.2f} | G: {self.last_loss_G:.2f} | GP: {self.last_loss_gp:.2f}')
if self.iterations % self.sample_every == 0:
i = self.iterations // self.sample_every
imgs = sample_generator(self.gan.G, 4)
imgs.clamp_(0., 1.)
save_image(imgs, f'./{i}.png', nrow = 2)
| pi-GAN-pytorch-main | pi_gan_pytorch/pi_gan_pytorch.py |
# taken from
# https://github.com/mkocabas/CoordConv-pytorch/blob/master/CoordConv.py
import torch
import torch.nn as nn
class AddCoords(nn.Module):
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
"""
Args:
input_tensor: shape(batch, channel, x_dim, y_dim)
"""
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
yy_channel = torch.arange(y_dim).repeat(1, x_dim, 1).transpose(1, 2)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_channel = yy_channel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
yy_channel = yy_channel.repeat(batch_size, 1, 1, 1).transpose(2, 3)
ret = torch.cat([
input_tensor,
xx_channel.type_as(input_tensor),
yy_channel.type_as(input_tensor)], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel.type_as(input_tensor) - 0.5, 2) + torch.pow(yy_channel.type_as(input_tensor) - 0.5, 2))
ret = torch.cat([ret, rr], dim=1)
return ret
class CoordConv(nn.Module):
def __init__(self, in_channels, out_channels, with_r=False, **kwargs):
super().__init__()
self.addcoords = AddCoords(with_r=with_r)
in_size = in_channels+2
if with_r:
in_size += 1
self.conv = nn.Conv2d(in_size, out_channels, **kwargs)
def forward(self, x):
ret = self.addcoords(x)
ret = self.conv(ret)
return ret | pi-GAN-pytorch-main | pi_gan_pytorch/coordconv.py |
from setuptools import setup, find_packages
setup(
name = 'jax2torch',
packages = find_packages(exclude=[]),
version = '0.0.7',
license='MIT',
description = 'Jax 2 Torch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/jax2torch',
keywords = [
'jax',
'pytorch'
],
install_requires=[
'torch>=1.6',
'jax>=0.2.20'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| jax2torch-main | setup.py |
from jax2torch.jax2torch import jax2torch
| jax2torch-main | jax2torch/__init__.py |
# https://gist.github.com/mattjj/e8b51074fed081d765d2f3ff90edf0e9
import torch
from torch.utils import dlpack as torch_dlpack
import jax
from jax import dlpack as jax_dlpack
import jax.numpy as jnp
from jax.tree_util import tree_map
from inspect import signature
from functools import wraps
def j2t(x_jax):
x_torch = torch_dlpack.from_dlpack(jax_dlpack.to_dlpack(x_jax))
return x_torch
def t2j(x_torch):
x_torch = x_torch.contiguous() # https://github.com/google/jax/issues/8082
x_jax = jax_dlpack.from_dlpack(torch_dlpack.to_dlpack(x_torch))
return x_jax
def tree_t2j(x_torch):
return tree_map(lambda t: t2j(t) if isinstance(t, torch.Tensor) else t, x_torch)
def tree_j2t(x_jax):
return tree_map(lambda t: j2t(t) if isinstance(t, jnp.ndarray) else t, x_jax)
def jax2torch(fn):
@wraps(fn)
def inner(*args, **kwargs):
class JaxFun(torch.autograd.Function):
@staticmethod
def forward(ctx, *args):
args = tree_t2j(args)
y_, ctx.fun_vjp = jax.vjp(fn, *args)
return tree_j2t(y_)
@staticmethod
def backward(ctx, *grad_args):
grad_args = tree_t2j(grad_args) if len(grad_args) > 1 else t2j(grad_args[0])
grads = ctx.fun_vjp(grad_args)
grads = tuple(map(lambda t: t if isinstance(t, jnp.ndarray) else None, grads))
return tree_j2t(grads)
sig = signature(fn)
bound = sig.bind(*args, **kwargs)
bound.apply_defaults()
return JaxFun.apply(*bound.arguments.values())
return inner
| jax2torch-main | jax2torch/jax2torch.py |
from setuptools import setup, find_packages
setup(
name = 'x-unet',
packages = find_packages(exclude=[]),
version = '0.3.1',
license='MIT',
description = 'X-Unet',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/x-unet',
keywords = [
'artificial intelligence',
'deep learning',
'biomedical segmentation',
'medical deep learning',
'unets',
],
install_requires=[
'beartype',
'einops>=0.4',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| x-unet-main | setup.py |
from x_unet.x_unet import XUnet, NestedResidualUnet | x-unet-main | x_unet/__init__.py |
from functools import partial
import math
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from beartype import beartype
from beartype.typing import Tuple, Union, Optional
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def is_power_two(n):
return math.log2(n).is_integer()
def divisible_by(num, denom):
return (num % denom) == 0
def cast_tuple(val, length = None):
if isinstance(val, list):
val = tuple(val)
output = val if isinstance(val, tuple) else ((val,) * default(length, 1))
if exists(length):
assert len(output) == length
return output
# helper classes
def Upsample(dim, dim_out):
return nn.ConvTranspose3d(dim, dim_out, (1, 4, 4), (1, 2, 2), (0, 1, 1))
def Downsample(dim, dim_out):
return nn.Sequential(
Rearrange('b c f (h s1) (w s2) -> b (c s1 s2) f h w', s1 = 2, s2 = 2),
nn.Conv3d(dim * 4, dim_out, 1)
)
# normalization
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(1, dim, 1, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) / (var + eps).sqrt() * self.gamma
class WeightStandardizedConv3d(nn.Conv3d):
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
weight = self.weight
mean = reduce(weight, 'o ... -> o 1 1 1 1', 'mean')
var = reduce(weight, 'o ... -> o 1 1 1 1', partial(torch.var, unbiased = False))
weight = (weight - mean) * (var + eps).rsqrt()
return F.conv3d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
# resnet blocks
class Block(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8,
weight_standardize = False,
frame_kernel_size = 1
):
super().__init__()
kernel_conv_kwargs = partial(kernel_and_same_pad, frame_kernel_size)
conv = nn.Conv3d if not weight_standardize else WeightStandardizedConv3d
self.proj = conv(dim, dim_out, **kernel_conv_kwargs(3, 3))
self.norm = nn.GroupNorm(groups, dim_out)
self.act = nn.SiLU()
def forward(self, x):
x = self.proj(x)
x = self.norm(x)
return self.act(x)
class ResnetBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
groups = 8,
frame_kernel_size = 1,
nested_unet_depth = 0,
nested_unet_dim = 32,
weight_standardize = False
):
super().__init__()
self.block1 = Block(dim, dim_out, groups = groups, weight_standardize = weight_standardize, frame_kernel_size = frame_kernel_size)
if nested_unet_depth > 0:
self.block2 = NestedResidualUnet(dim_out, depth = nested_unet_depth, M = nested_unet_dim, frame_kernel_size = frame_kernel_size, weight_standardize = weight_standardize, add_residual = True)
else:
self.block2 = Block(dim_out, dim_out, groups = groups, weight_standardize = weight_standardize, frame_kernel_size = frame_kernel_size)
self.res_conv = nn.Conv3d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x):
h = self.block1(x)
h = self.block2(h)
return h + self.res_conv(x)
# convnext 2
class GRN(nn.Module):
""" global response normalization, proposed in updated convnext paper """
def __init__(self, dim, eps = 1e-5):
super().__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.zeros(dim, 1, 1, 1))
self.bias = nn.Parameter(torch.zeros(dim, 1, 1, 1))
def forward(self, x):
spatial_l2_norm = x.norm(p = 2, dim = (2, 3, 4), keepdim = True)
feat_norm = spatial_l2_norm / spatial_l2_norm.mean(dim = -1, keepdim = True).clamp(min = self.eps)
return x * feat_norm * self.gamma + self.bias + x
class ConvNextBlock(nn.Module):
def __init__(
self,
dim,
dim_out,
*,
mult = 2,
frame_kernel_size = 1,
nested_unet_depth = 0,
nested_unet_dim = 32
):
super().__init__()
kernel_conv_kwargs = partial(kernel_and_same_pad, frame_kernel_size)
self.ds_conv = nn.Conv3d(dim, dim, **kernel_conv_kwargs(7, 7), groups = dim)
inner_dim = dim_out * mult
self.net = nn.Sequential(
LayerNorm(dim),
nn.Conv3d(dim, inner_dim, **kernel_conv_kwargs(3, 3), groups = dim_out),
nn.GELU(),
GRN(inner_dim),
nn.Conv3d(inner_dim, dim_out, **kernel_conv_kwargs(3, 3), groups = dim_out)
)
self.nested_unet = NestedResidualUnet(dim_out, depth = nested_unet_depth, M = nested_unet_dim, add_residual = True) if nested_unet_depth > 0 else nn.Identity()
self.res_conv = nn.Conv3d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb = None):
h = self.ds_conv(x)
h = self.net(h)
h = self.nested_unet(h)
return h + self.res_conv(x)
# feedforward
def FeedForward(dim, mult = 4.):
inner_dim = int(dim * mult)
return Residual(nn.Sequential(
LayerNorm(dim),
nn.Conv3d(dim, inner_dim, 1, bias = False),
nn.GELU(),
LayerNorm(inner_dim), # properly credit assign normformer
nn.Conv3d(inner_dim, dim, 1, bias = False)
))
# attention
class Attention(nn.Module):
def __init__(
self,
dim,
heads = 4,
dim_head = 64
):
super().__init__()
self.scale = dim_head ** -0.5
self.heads = heads
inner_dim = heads * dim_head
self.norm = LayerNorm(dim)
self.to_qkv = nn.Conv3d(dim, inner_dim * 3, 1, bias = False)
self.to_out = nn.Conv3d(inner_dim, dim, 1, bias = False)
def forward(self, x):
f, h, w = x.shape[-3:]
residual = x.clone()
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = 1)
q, k, v = map(lambda t: rearrange(t, 'b (h c) ... -> b h (...) c', h = self.heads), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h (f x y) d -> b (h d) f x y', f = f, x = h, y = w)
return self.to_out(out) + residual
class TransformerBlock(nn.Module):
def __init__(
self,
dim,
*,
depth,
**kwargs
):
super().__init__()
self.attn = Attention(dim, **kwargs)
self.ff =FeedForward(dim)
def forward(self, x):
x = self.attn(x)
x = self.ff(x)
return x
class FeatureMapConsolidator(nn.Module):
def __init__(
self,
dim,
*,
dim_ins = tuple(),
dim_outs = tuple(),
resize_fmap_before = True,
conv_block_fn = None
):
super().__init__()
assert len(dim_ins) == len(dim_outs)
self.needs_consolidating = len(dim_ins) > 0
block_fn = default(conv_block_fn, Block)
self.fmap_convs = nn.ModuleList([block_fn(dim_in, dim_out) for dim_in, dim_out in zip(dim_ins, dim_outs)])
self.resize_fmap_before = resize_fmap_before
self.final_dim_out = dim + (sum(dim_outs) if len(dim_outs) > 0 else 0)
def resize_fmaps(self, fmaps, height, width):
return [F.interpolate(fmap, (fmap.shape[-3], height, width)) for fmap in fmaps]
def forward(self, x, fmaps = None):
target_height, target_width = x.shape[-2:]
fmaps = default(fmaps, tuple())
if not self.needs_consolidating:
return x
if self.resize_fmap_before:
fmaps = self.resize_fmaps(fmaps, target_height, target_width)
outs = []
for fmap, conv in zip(fmaps, self.fmap_convs):
outs.append(conv(fmap))
if self.resize_fmap_before:
outs = self.resize_fmaps(outs, target_height, target_width)
return torch.cat((x, *outs), dim = 1)
# unet
def MaybeTuple(type):
return Union[type, Tuple[type, ...]]
def kernel_and_same_pad(*kernel_size):
paddings = tuple(map(lambda k: k // 2, kernel_size))
return dict(kernel_size = kernel_size, padding = paddings)
class XUnet(nn.Module):
@beartype
def __init__(
self,
dim,
init_dim = None,
out_dim = None,
frame_kernel_size = 1,
dim_mults: MaybeTuple(int) = (1, 2, 4, 8),
num_blocks_per_stage: MaybeTuple(int) = (2, 2, 2, 2),
num_self_attn_per_stage: MaybeTuple(int) = (0, 0, 0, 1),
nested_unet_depths: MaybeTuple(int) = (0, 0, 0, 0),
nested_unet_dim = 32,
channels = 3,
use_convnext = False,
resnet_groups = 8,
consolidate_upsample_fmaps = True,
skip_scale = 2 ** -0.5,
weight_standardize = False,
attn_heads: MaybeTuple(int) = 8,
attn_dim_head: MaybeTuple(int) = 32
):
super().__init__()
self.train_as_images = frame_kernel_size == 1
self.skip_scale = skip_scale
self.channels = channels
init_dim = default(init_dim, dim)
self.init_conv = nn.Conv3d(channels, init_dim, **kernel_and_same_pad(frame_kernel_size, 7, 7))
dims = [init_dim, *map(lambda m: dim * m, dim_mults)]
in_out = list(zip(dims[:-1], dims[1:]))
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
num_resolutions = len(in_out)
# resnet or convnext
blocks = partial(ConvNextBlock, frame_kernel_size = frame_kernel_size) if use_convnext else partial(ResnetBlock, groups = resnet_groups, weight_standardize = weight_standardize, frame_kernel_size = frame_kernel_size)
# whether to use nested unet, as in unet squared paper
nested_unet_depths = cast_tuple(nested_unet_depths, num_resolutions)
# number of blocks per stage
num_blocks_per_stage = cast_tuple(num_blocks_per_stage, num_resolutions)
assert all([num_blocks > 0 for num_blocks in num_blocks_per_stage])
# number of self attention blocks per stage
num_self_attn_per_stage = cast_tuple(num_self_attn_per_stage, num_resolutions)
assert all([num_self_attn_blocks >= 0 for num_self_attn_blocks in num_self_attn_per_stage])
# attn kwargs
attn_heads = cast_tuple(attn_heads, num_resolutions)
attn_dim_head = cast_tuple(attn_dim_head, num_resolutions)
# modules for all layers
skip_dims = []
down_stage_parameters = [
in_out,
nested_unet_depths,
num_blocks_per_stage,
num_self_attn_per_stage,
attn_heads,
attn_dim_head
]
up_stage_parameters = [reversed(params[:-1]) for params in down_stage_parameters]
# downs
for ind, ((dim_in, dim_out), nested_unet_depth, num_blocks, self_attn_blocks, heads, dim_head) in enumerate(zip(*down_stage_parameters)):
is_last = ind >= (num_resolutions - 1)
skip_dims.append(dim_in)
self.downs.append(nn.ModuleList([
blocks(dim_in, dim_in, nested_unet_depth = nested_unet_depth, nested_unet_dim = nested_unet_dim),
nn.ModuleList([blocks(dim_in, dim_in, nested_unet_depth = nested_unet_depth, nested_unet_dim = nested_unet_dim) for _ in range(num_blocks - 1)]),
nn.ModuleList([TransformerBlock(dim_in, depth = self_attn_blocks, heads = heads, dim_head = dim_head) for _ in range(self_attn_blocks)]),
Downsample(dim_in, dim_out)
]))
# middle
mid_dim = dims[-1]
mid_nested_unet_depth = nested_unet_depths[-1]
self.mid = blocks(mid_dim, mid_dim, nested_unet_depth = mid_nested_unet_depth, nested_unet_dim = nested_unet_dim)
self.mid_attn = Attention(mid_dim, heads = attn_heads[-1], dim_head = attn_dim_head[-1])
self.mid_after = blocks(mid_dim, mid_dim, nested_unet_depth = mid_nested_unet_depth, nested_unet_dim = nested_unet_dim)
self.mid_upsample = Upsample(mid_dim, dims[-2])
# ups
for ind, ((dim_in, dim_out), nested_unet_depth, num_blocks, self_attn_blocks, heads, dim_head) in enumerate(zip(*up_stage_parameters)):
is_last = ind >= (num_resolutions - 1)
self.ups.append(nn.ModuleList([
blocks(dim_out + skip_dims.pop(), dim_out, nested_unet_depth = nested_unet_depth, nested_unet_dim = nested_unet_dim),
nn.ModuleList([blocks(dim_out, dim_out, nested_unet_depth = nested_unet_depth, nested_unet_dim = nested_unet_dim) for _ in range(num_blocks - 1)]),
nn.ModuleList([TransformerBlock(dim_out, depth = self_attn_blocks, heads = heads, dim_head = dim_head) for _ in range(self_attn_blocks)]),
Upsample(dim_out, dim_in) if not is_last else nn.Identity()
]))
out_dim = default(out_dim, channels)
if consolidate_upsample_fmaps:
self.consolidator = FeatureMapConsolidator(
dim,
dim_ins = tuple(map(lambda m: dim * m, dim_mults)),
dim_outs = (dim,) * len(dim_mults),
conv_block_fn = blocks
)
else:
self.consolidator = FeatureMapConsolidator(dim = dim)
final_dim_in = self.consolidator.final_dim_out
self.final_conv = nn.Sequential(
blocks(final_dim_in + dim, dim),
nn.Conv3d(dim, out_dim, **kernel_and_same_pad(frame_kernel_size, 3, 3))
)
def forward(self, x):
is_image = x.ndim == 4
# validations
assert not (is_image and not self.train_as_images), 'you specified a frame kernel size for the convolutions in this unet, but you are passing in images'
assert not (not is_image and self.train_as_images), 'you specified no frame kernel size dimension, yet you are passing in a video. fold the frame dimension into the batch'
# cast images to 1 framed video
if is_image:
x = rearrange(x, 'b c h w -> b c 1 h w')
# initial convolution
x = self.init_conv(x)
# residual
r = x.clone()
# downs and ups
down_hiddens = []
up_hiddens = []
for init_block, blocks, attn_blocks, downsample in self.downs:
x = init_block(x)
for block in blocks:
x = block(x)
for attn_block in attn_blocks:
x = attn_block(x)
down_hiddens.append(x)
x = downsample(x)
x = self.mid(x)
x = self.mid_attn(x) + x
x = self.mid_after(x)
up_hiddens.append(x)
x = self.mid_upsample(x)
for init_block, blocks, attn_blocks, upsample in self.ups:
x = torch.cat((x, down_hiddens.pop() * self.skip_scale), dim=1)
x = init_block(x)
for block in blocks:
x = block(x)
for attn_block in attn_blocks:
x = attn_block(x)
up_hiddens.insert(0, x)
x = upsample(x)
# consolidate feature maps
x = self.consolidator(x, up_hiddens)
# final residual
x = torch.cat((x, r), dim = 1)
# final convolution
out = self.final_conv(x)
if is_image:
out = rearrange(out, 'b c 1 h w -> b c h w')
return out
# RSU
class PixelShuffleUpsample(nn.Module):
def __init__(
self,
dim,
dim_out = None,
scale_factor = 2
):
super().__init__()
self.scale_squared = scale_factor ** 2
dim_out = default(dim_out, dim)
conv = nn.Conv3d(dim, dim_out * self.scale_squared, 1)
self.net = nn.Sequential(
conv,
nn.SiLU(),
Rearrange('b (c r s) f h w -> b c f (h r) (w s)', r = scale_factor, s = scale_factor)
)
self.init_conv_(conv)
def init_conv_(self, conv):
o, i, *rest_dims = conv.weight.shape
conv_weight = torch.empty(o // self.scale_squared, i, *rest_dims)
nn.init.kaiming_uniform_(conv_weight)
conv_weight = repeat(conv_weight, 'o ... -> (o r) ...', r = self.scale_squared)
conv.weight.data.copy_(conv_weight)
nn.init.zeros_(conv.bias.data)
def forward(self, x):
x = self.net(x)
return x
class NestedResidualUnet(nn.Module):
def __init__(
self,
dim,
*,
depth,
M = 32,
frame_kernel_size = 1,
add_residual = False,
groups = 4,
skip_scale = 2 ** -0.5,
weight_standardize = False
):
super().__init__()
self.depth = depth
self.downs = nn.ModuleList([])
self.ups = nn.ModuleList([])
conv = WeightStandardizedConv3d if weight_standardize else nn.Conv3d
for ind in range(depth):
is_first = ind == 0
dim_in = dim if is_first else M
down = nn.Sequential(
conv(dim_in, M, (1, 4, 4), stride = (1, 2, 2), padding = (0, 1, 1)),
nn.GroupNorm(groups, M),
nn.SiLU()
)
up = nn.Sequential(
PixelShuffleUpsample(2 * M, dim_in),
nn.GroupNorm(groups, dim_in),
nn.SiLU()
)
self.downs.append(down)
self.ups.append(up)
self.mid = nn.Sequential(
conv(M, M, **kernel_and_same_pad(frame_kernel_size, 3, 3)),
nn.GroupNorm(groups, M),
nn.SiLU()
)
self.skip_scale = skip_scale
self.add_residual = add_residual
def forward(self, x, residual = None):
is_video = x.ndim == 5
if self.add_residual:
residual = default(residual, x.clone())
*_, h, w = x.shape
layers = len(self.ups)
for dim_name, size in (('height', h), ('width', w)):
assert divisible_by(size, 2 ** layers), f'{dim_name} dimension {size} must be divisible by {2 ** layers} ({layers} layers in nested unet)'
assert (size % (2 ** self.depth)) == 0, f'the unet has too much depth for the image {dim_name} ({size}) being passed in'
# hiddens
hiddens = []
# unet
for down in self.downs:
x = down(x)
hiddens.append(x.clone().contiguous())
x = self.mid(x)
for up in reversed(self.ups):
x = torch.cat((x, hiddens.pop() * self.skip_scale), dim = 1)
x = up(x)
# adding residual
if self.add_residual:
x = x + residual
x = F.silu(x)
return x
| x-unet-main | x_unet/x_unet.py |
import distutils
import distutils.spawn
import os
import platform
import re
import shutil
import subprocess
import sys
import tarfile
import urllib.request
from distutils.version import LooseVersion
from setuptools import Extension, setup
from setuptools.command.build_ext import build_ext
# Taken from https://github.com/pytorch/pytorch/blob/master/tools/setup_helpers/env.py
def check_env_flag(name: str, default: str = "") -> bool:
return os.getenv(name, default).upper() in ["ON", "1", "YES", "TRUE", "Y"]
def get_build_type():
if check_env_flag("DEBUG"):
return "Debug"
elif check_env_flag("REL_WITH_DEB_INFO"):
return "RelWithDebInfo"
else:
return "Release"
def get_llvm():
# tries to find system LLVM
versions = ['-11.0', '-11', '-11-64']
supported = ['llvm-config{v}'.format(v=v) for v in versions]
paths = [distutils.spawn.find_executable(cfg) for cfg in supported]
paths = [p for p in paths if p is not None]
if paths:
return '', ''
if platform.system() == "Windows":
return '', ''
# download if nothing is installed
name = 'clang+llvm-11.0.1-x86_64-linux-gnu-ubuntu-16.04'
dir = os.path.join(os.environ["HOME"], ".triton", "llvm")
llvm_include_dir = '{dir}/{name}/include'.format(dir=dir, name=name)
llvm_library_dir = '{dir}/{name}/lib'.format(dir=dir, name=name)
if not os.path.exists(llvm_library_dir):
os.makedirs(dir, exist_ok=True)
try:
shutil.rmtree(os.path.join(dir, name))
except Exception:
pass
url = "https://github.com/llvm/llvm-project/releases/download/llvmorg-11.0.1/{name}.tar.xz".format(name=name)
print('downloading and extracting ' + url + '...')
ftpstream = urllib.request.urlopen(url)
file = tarfile.open(fileobj=ftpstream, mode="r|xz")
file.extractall(path=dir)
return llvm_include_dir, llvm_library_dir
class CMakeExtension(Extension):
def __init__(self, name, path, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
self.path = path
class CMakeBuild(build_ext):
user_options = build_ext.user_options + [('base-dir=', None, 'base directory of Triton')]
def initialize_options(self):
build_ext.initialize_options(self)
self.base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
def finalize_options(self):
build_ext.finalize_options(self)
def run(self):
try:
out = subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " + ", ".join(e.name for e in self.extensions)
)
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r"version\s*([\d.]+)", out.decode()).group(1))
if cmake_version < "3.1.0":
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
llvm_include_dir, llvm_library_dir = get_llvm()
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.path)))
# create build directories
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# python directories
python_include_dirs = [distutils.sysconfig.get_python_inc()]
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir,
"-DBUILD_TUTORIALS=OFF",
"-DBUILD_PYTHON_MODULE=ON",
"-DLLVM_INCLUDE_DIRS=" + llvm_include_dir,
"-DLLVM_LIBRARY_DIR=" + llvm_library_dir,
# '-DPYTHON_EXECUTABLE=' + sys.executable,
# '-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON',
"-DPYTHON_INCLUDE_DIRS=" + ";".join(python_include_dirs)
]
# configuration
cfg = get_build_type()
build_args = ["--config", cfg]
if platform.system() == "Windows":
cmake_args += ["-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ["-A", "x64"]
build_args += ["--", "/m"]
else:
import multiprocessing
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", '-j' + str(2 * multiprocessing.cpu_count())]
env = os.environ.copy()
subprocess.check_call(["cmake", self.base_dir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=self.build_temp)
setup(
name="triton",
version="2.0.0",
author="Philippe Tillet",
author_email="phil@openai.com",
description="A language and compiler for custom Deep Learning operations",
long_description="",
packages=["triton", "triton/_C", "triton/language", "triton/tools", "triton/ops", "triton/ops/blocksparse"],
install_requires=[
"cmake",
"filelock",
"torch",
],
package_data={
"triton/ops": ["*.c"],
"triton/ops/blocksparse": ["*.c"],
"triton/language": ["*.bc"],
},
include_package_data=True,
ext_modules=[CMakeExtension("triton", "triton/_C/")],
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
# for PyPI
keywords=["Compiler", "Deep Learning"],
url="https://github.com/openai/triton/",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
],
extras_require={
"tests": [
"autopep8",
"flake8",
"isort",
"numpy",
"pytest",
"scipy>=1.7.1",
],
"tutorials": [
"matplotlib",
"pandas",
"tabulate",
],
},
)
| triton-master | python/setup.py |
import argparse
import inspect
import os
import sys
import triton
def run_all(result_dir, names):
if not os.path.exists(result_dir):
os.makedirs(result_dir)
for mod in os.listdir(os.path.dirname(os.path.realpath(__file__))):
# skip non python files
if not mod.endswith('.py'):
continue
# skip file not in provided names
if names and names not in mod:
continue
# skip files that don't start with 'bench_'
if not mod.startswith('bench_'):
continue
print(f'running {mod}...')
mod = __import__(os.path.splitext(mod)[0])
benchmarks = inspect.getmembers(mod, lambda x: isinstance(x, triton.testing.Mark))
for name, bench in benchmarks:
curr_dir = os.path.join(result_dir, mod.__name__.replace('bench_', ''))
if len(benchmarks) > 1:
curr_dir = os.path.join(curr_dir, name.replace('bench_', ''))
if not os.path.exists(curr_dir):
os.makedirs(curr_dir)
bench.run(save_path=curr_dir)
def main(args):
parser = argparse.ArgumentParser(description="Run the benchmark suite.")
parser.add_argument("-r", "--result-dir", type=str, default='results', required=False)
parser.add_argument("-n", "--names", type=str, default='', required=False)
parser.set_defaults(feature=False)
args = parser.parse_args(args)
run_all(args.result_dir, args.names)
if __name__ == '__main__':
main(sys.argv[1:])
| triton-master | python/bench/run.py |
import torch
import triton
# -------------------------------
# Matrix Multiplication
# -------------------------------
nt = {False: 'n', True: 't'}
square_confs = [
triton.testing.Benchmark(
x_names=['M', 'N', 'K'],
x_vals=[128, 256, 512, 1024, 2048, 3072, 4096, 6144],
line_arg='block',
line_vals=[16, 32, 64, 128],
line_names=['Block16', 'Block32', 'Block64', 'Block128'],
ylabel='TFLOPS',
plot_name=f'{op_mode}-{layout_mode}-square-{nt[AT]}{nt[BT]}',
args={'layout_mode': layout_mode, 'op_mode': op_mode,
'AT': AT, 'BT': BT, 'dtype': torch.float16, 'provider': 'triton'}
)
for AT in [False] for BT in [False]
for op_mode in ['dsd'] for layout_mode in ['dense']
]
@triton.testing.perf_report(square_confs)
def bench_matmul(M, N, K, block, layout_mode, op_mode, AT, BT, dtype, provider, warmup=100, rep=1000):
Z, H = 1, 1
make_layout = {
'tril': lambda H, M, N: torch.tril(torch.ones((H, M, N), dtype=torch.int64)),
'dense': lambda H, M, N: torch.ones(H, M, N, dtype=torch.int64),
}[layout_mode]
# create layout
shape = {'sdd': (M, N), 'dsd': (K, M) if AT else (M, K), 'dds': (N, K) if BT else (K, N)}[op_mode]
layout = make_layout(H, shape[0] // block, shape[1] // block)
# creat inputs
a = torch.randn((Z, H, K, M) if AT else (Z, H, M, K), dtype=dtype, device='cuda')
b = torch.randn((Z, H, N, K) if BT else (Z, H, K, N), dtype=dtype, device='cuda')
# create op
tflops = lambda ms: num_flops / ms * 1e3
if provider == 'triton':
op = triton.ops.blocksparse.matmul(layout, block, op_mode, device="cuda", trans_a=AT, trans_b=BT)
# inputs
a = triton.testing.sparsify_tensor(a, layout, block) if op_mode == 'dsd' else a
b = triton.testing.sparsify_tensor(b, layout, block) if op_mode == 'dds' else b
mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(a, b), warmup=warmup, rep=rep)
num_flops = {
'sdd': 2 * Z * K * float(layout.sum()) * block * block,
'dsd': 2 * Z * N * float(layout.sum()) * block * block,
'dds': 2 * Z * M * float(layout.sum()) * block * block
}[op_mode] * 1e-12
return tflops(mean_ms), tflops(min_ms), tflops(max_ms)
# -------------------------------
# Softmax
# -------------------------------
square_confs = [
triton.testing.Benchmark(
x_names=['M', 'N'],
x_vals=[128, 256, 512, 1024, 2048, 3072, 4096, 6144],
line_arg='block',
line_vals=[16, 32, 64],
line_names=['Block16', 'Block32', 'Block64'],
ylabel='GBPS',
plot_name=f'{layout_mode}-square',
args={'layout_mode': layout_mode, 'dtype': torch.float16, 'provider': 'triton'}
)
for layout_mode in ['dense', 'tril']
]
@triton.testing.perf_report(square_confs)
def bench_softmax(M, N, block, layout_mode, dtype, provider, warmup=10, rep=50):
Z, H = 1, 1
make_layout = {
'tril': lambda H, M, N: torch.tril(torch.ones((H, M, N), dtype=torch.int64)),
'dense': lambda H, M, N: torch.ones(H, M, N, dtype=torch.int64),
}[layout_mode]
layout = make_layout(H, M // block, N // block)
a = torch.randn((Z, H, M, N), dtype=dtype, device='cuda')
if provider == 'triton':
a = triton.testing.sparsify_tensor(a, layout, block)
op = triton.ops.blocksparse.softmax(layout, block, device="cuda")
gbps = lambda ms: (2 * a.numel() * a.element_size() * 1e-9) / (ms * 1e-3)
mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(a), warmup=warmup, rep=rep)
return gbps(mean_ms), gbps(min_ms), gbps(max_ms)
bench_matmul.run(print_data=True, show_plots=True)
| triton-master | python/bench/bench_blocksparse.py |
import torch
import triton
confs = [
triton.testing.Benchmark(
x_names=['N'],
x_vals=[128, 256, 512, 1024, 2048, 3072, 4096, 6144, 8192],
line_arg='provider',
line_vals=['triton', 'torch'],
line_names=['Triton', 'Torch'],
ylabel='GBPS',
plot_name=f'{mode}-2048',
args={'M': 2048, 'dtype': torch.float16, 'mode': mode}
)
for mode in ['forward', 'backward']
]
@triton.testing.perf_report(confs)
def bench_op(M, N, dtype, mode, provider):
# create inputs
x = torch.randn(M, N, dtype=dtype, device='cuda', requires_grad=True)
idx = 4 + torch.ones(M, dtype=torch.int64, device='cuda')
num_gb = (2 * x.numel() * x.element_size() * 1e-9)
gbps = lambda ms: num_gb / ms * 1e3
# forward pass
op = {'torch': torch.nn.CrossEntropyLoss(reduction='none'),
'triton': triton.ops.cross_entropy}[provider]
if mode == 'forward':
mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(x, idx))
if mode == 'backward':
y = op(x, idx)
dy = torch.randn_like(y)
fn = lambda: y.backward(dy, retain_graph=True)
mean_ms, min_ms, max_ms = triton.testing.do_bench(fn, grad_to_none=[x])
return gbps(mean_ms), gbps(min_ms), gbps(max_ms)
if __name__ == '__main__':
bench_op.run(print_data=True)
| triton-master | python/bench/bench_cross_entropy.py |
import torch
import triton
def rounded_linspace(low, high, steps, div):
ret = torch.linspace(low, high, steps)
ret = torch.div(ret.int() + div - 1, div, rounding_mode='trunc') * div
ret = torch.unique(ret)
return list(map(int, ret))
# Square benchmarks
nt = {False: "n", True: "t"}
square_confs = [
triton.testing.Benchmark(
x_names=["M", "N", "K"],
x_vals=rounded_linspace(512, 8192, 32, 128),
line_arg="provider",
line_vals=["cublas", "triton", "cutlass"],
line_names=["cuBLAS", "Triton", "CUTLASS"],
ylabel="TFLOPS",
plot_name=f"matmul-square-{nt[AT]}{nt[BT]}",
args={"AT": AT, "BT": BT, "dtype": torch.float16},
) for AT in [False] for BT in [False]
]
# Transformer training benchmarks
transformer_confs = [
triton.testing.Benchmark(
x_names=[x],
x_vals=rounded_linspace(NK // 16, NK, 32, 128),
line_arg="provider",
line_vals=["cublas", "triton", "cutlass"],
line_names=["cuBLAS", "Triton", "CUTLASS"],
ylabel="TFLOPS",
plot_name=f"matmul-M{M}-{'NK'.replace(x, '')}{NK}",
args={"M": M, 'NK'.replace(x, ''): NK, "AT": False, "BT": False, "dtype": torch.float16}
) for NK in [12288]
for i, x in enumerate(["N", "K"])
for M in [2048]
]
@triton.testing.perf_report(square_confs)
def bench_op(M, N, K, AT, BT, dtype, provider, warmup=25, rep=75):
a = torch.rand((K, M) if AT else (M, K), device="cuda", dtype=dtype)
b = torch.rand((N, K) if BT else (K, N), device="cuda", dtype=dtype)
if AT:
a = a.t()
if BT:
b = b.t()
tflops = lambda ms: 2. * M * N * K / ms * 1e-9
if provider == "cublas":
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b), warmup=warmup, rep=rep)
return tflops(ms), tflops(max_ms), tflops(min_ms)
if provider == "triton":
ms, min_ms, max_ms = triton.testing.do_bench(lambda: triton.ops.matmul(a, b), warmup=warmup, rep=rep)
return tflops(ms), tflops(max_ms), tflops(min_ms)
if provider == "cutlass":
cutlass_matmul = triton.testing.cutlass_matmul
try:
ms, min_ms, max_ms = triton.testing.do_bench(lambda: cutlass_matmul(a, b), warmup=warmup, rep=rep)
return tflops(ms), tflops(max_ms), tflops(min_ms)
except Exception:
return None
return None
| triton-master | python/bench/bench_matmul.py |
import pytest
import torch
import triton
import triton._C.libtriton.triton as _triton
@pytest.mark.parametrize("M, N, dtype, mode",
[
(M, N, dtype, mode) for M in [1024, 821]
for N in [512, 857, 1871, 2089, 8573, 31000]
for dtype in ['bfloat16', 'float16', 'float32']
for mode in ['forward', 'backward']
]
)
def test_op(M, N, dtype, mode):
cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device())
if cc < 80 and dtype == "bfloat16":
pytest.skip("Only test bfloat16 on devices with sm >= 80")
dtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16, 'float32': torch.float32}[dtype]
# create inputs
x = torch.randn(M, N, dtype=dtype, device='cuda', requires_grad=True)
idx = 4 + torch.ones(M, dtype=torch.int64, device='cuda')
# forward pass
tt_y = triton.ops.cross_entropy(x, idx)
th_y = torch.nn.CrossEntropyLoss(reduction="none")(x, idx)
if mode == 'forward':
triton.testing.assert_almost_equal(th_y, tt_y)
# backward pass
elif mode == 'backward':
dy = torch.randn_like(tt_y)
# triton backward
tt_y.backward(dy)
tt_dx = x.grad.clone()
# torch backward
x.grad.zero_()
th_y.backward(dy)
th_dx = x.grad.clone()
triton.testing.assert_almost_equal(th_dx, tt_dx)
| triton-master | python/test/unit/operators/test_cross_entropy.py |
import pytest
import torch
import triton
@pytest.mark.parametrize("MODE", ["sdd", "dds", "dsd"])
@pytest.mark.parametrize("TRANS_A", [False, True])
@pytest.mark.parametrize("TRANS_B", [False, True])
@pytest.mark.parametrize("BLOCK", [16, 32, 64])
@pytest.mark.parametrize("DTYPE", [torch.float16])
def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=3, H=2, M=512, N=384, K=256):
seed = 0
torch.manual_seed(seed)
is_sdd = MODE == "sdd"
is_dsd = MODE == "dsd"
is_dds = MODE == "dds"
do_sparsify = lambda x: triton.testing.sparsify_tensor(x, layout, BLOCK)
do_mask = lambda x: triton.testing.mask_tensor(x, layout, BLOCK)
# create inputs
# create op
a_shape = (Z, H, K, M) if TRANS_A else (Z, H, M, K)
b_shape = (Z, H, N, K) if TRANS_B else (Z, H, K, N)
c_shape = (Z, H, M, N)
shape = {
"sdd": (M, N),
"dsd": (a_shape[2], a_shape[3]),
"dds": (b_shape[2], b_shape[3]),
}[MODE]
layout = torch.randint(2, (H, shape[0] // BLOCK, shape[1] // BLOCK))
layout[1, 2, :] = 0
layout[1, :, 1] = 0
# create data
a_ref, a_tri = triton.testing.make_pair(a_shape, alpha=.1)
b_ref, b_tri = triton.testing.make_pair(b_shape, alpha=.1)
dc_ref, dc_tri = triton.testing.make_pair(c_shape)
# compute [torch]
dc_ref = do_mask(dc_ref) if is_sdd else dc_ref
a_ref = do_mask(a_ref) if is_dsd else a_ref
b_ref = do_mask(b_ref) if is_dds else b_ref
a_ref.retain_grad()
b_ref.retain_grad()
c_ref = torch.matmul(a_ref.transpose(2, 3) if TRANS_A else a_ref,
b_ref.transpose(2, 3) if TRANS_B else b_ref)
c_ref.backward(dc_ref)
c_ref = do_sparsify(c_ref) if is_sdd else c_ref
da_ref = do_sparsify(a_ref.grad) if is_dsd else a_ref.grad
db_ref = do_sparsify(b_ref.grad) if is_dds else b_ref.grad
# triton result
dc_tri = do_sparsify(dc_tri) if is_sdd else dc_tri
a_tri = do_sparsify(a_tri) if is_dsd else a_tri
b_tri = do_sparsify(b_tri) if is_dds else b_tri
a_tri.retain_grad()
b_tri.retain_grad()
op = triton.ops.blocksparse.matmul(layout, BLOCK, MODE, trans_a=TRANS_A, trans_b=TRANS_B, device="cuda")
c_tri = triton.testing.catch_oor(lambda: op(a_tri, b_tri), pytest)
triton.testing.catch_oor(lambda: c_tri.backward(dc_tri), pytest)
da_tri = a_tri.grad
db_tri = b_tri.grad
# compare
triton.testing.assert_almost_equal(c_ref, c_tri)
triton.testing.assert_almost_equal(da_ref, da_tri)
triton.testing.assert_almost_equal(db_ref, db_tri)
configs = [
(16, 256),
(32, 576),
(64, 1871),
(128, 2511),
]
@pytest.mark.parametrize("is_dense", [False, True])
@pytest.mark.parametrize("BLOCK, WIDTH", configs)
def test_softmax(BLOCK, WIDTH, is_dense, Z=2, H=2, is_causal=True, scale=0.4):
# set seed
torch.random.manual_seed(0)
Z, H, M, N = 2, 3, WIDTH, WIDTH
# initialize layout
# make sure each row has at least one non-zero element
layout = torch.randint(2, (H, M // BLOCK, N // BLOCK))
if is_dense:
layout[:] = 1
else:
layout[1, 2, :] = 0
layout[1, :, 1] = 0
# initialize data
a_shape = (Z, H, M, N)
a_ref, a_tri = triton.testing.make_pair(a_shape)
dout_ref, dout_tri = triton.testing.make_pair(a_shape)
# compute [torch]
a_ref = triton.testing.mask_tensor(a_ref, layout, BLOCK, value=float("-inf"))
a_ref.retain_grad()
at_mask = torch.ones((M, N), device="cuda")
if is_causal:
at_mask = torch.tril(at_mask)
M = at_mask[None, None, :, :] + torch.zeros_like(a_ref)
a_ref[M == 0] = float("-inf")
out_ref = torch.softmax(a_ref * scale, -1)
out_ref.backward(dout_ref)
out_ref = triton.testing.sparsify_tensor(out_ref, layout, BLOCK)
da_ref = triton.testing.sparsify_tensor(a_ref.grad, layout, BLOCK)
# compute [triton]
a_tri = triton.testing.sparsify_tensor(a_tri, layout, BLOCK)
a_tri.retain_grad()
dout_tri = triton.testing.sparsify_tensor(dout_tri, layout, BLOCK)
op = triton.ops.blocksparse.softmax(layout, BLOCK, device="cuda", is_dense=is_dense)
out_tri = op(a_tri, scale=scale, is_causal=is_causal)
out_tri.backward(dout_tri)
da_tri = a_tri.grad
# compare
triton.testing.assert_almost_equal(out_tri, out_ref)
triton.testing.assert_almost_equal(da_tri, da_ref)
@pytest.mark.parametrize("block", [16, 32, 64])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_attention_fwd_bwd(
block,
dtype,
input_scale=1.0,
scale=1 / 8.0,
n_ctx=256,
batch_size=2,
n_heads=2,
):
# inputs
qkv_shape = (batch_size, n_heads, n_ctx, 64)
qkvs = [
torch.nn.Parameter(input_scale * torch.randn(qkv_shape), requires_grad=True).to(dtype).cuda() for _ in range(3)
]
# Triton:
n_blocks = n_ctx // block
layout = torch.tril(torch.ones([n_heads, n_blocks, n_blocks], dtype=torch.long))
query, key, value = [x.clone() for x in qkvs]
query.retain_grad()
key.retain_grad()
value.retain_grad()
attn_out = triton_attention(layout, block, query=query, key=key, value=value, scale=scale)
# ad hoc loss
loss = (attn_out ** 2).mean()
loss.backward()
grads = [query.grad, key.grad, value.grad]
# Torch version:
torch_q, torch_k, torch_v = [x.clone() for x in qkvs]
attn_mask = torch.ones([n_ctx, n_ctx], device="cuda", dtype=dtype)
attn_mask = torch.tril(attn_mask, diagonal=0)
attn_mask = 1e6 * (-1 + (attn_mask.reshape((1, 1, n_ctx, n_ctx)).cuda()))
torch_q.retain_grad()
torch_k.retain_grad()
torch_v.retain_grad()
scores = scale * torch.einsum("bhsd,bhtd->bhst", torch_q, torch_k)
scores = scores + attn_mask
probs = torch.softmax(scores, dim=-1)
torch_attn_out = torch.einsum("bhst,bhtd->bhsd", probs, torch_v)
# ad hoc loss
torch_loss = (torch_attn_out ** 2).mean()
torch_loss.backward()
torch_grads = [torch_q.grad, torch_k.grad, torch_v.grad]
# comparison
# print(f"Triton loss {loss} and torch loss {torch_loss}. Also checking grads...")
triton.testing.assert_almost_equal(loss, torch_loss)
for g1, g2 in zip(grads, torch_grads):
triton.testing.assert_almost_equal(g1, g2)
@pytest.mark.parametrize("block", [16, 32, 64])
def triton_attention(
layout,
block: int,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
scale: float,
):
sparse_dot_sdd_nt = triton.ops.blocksparse.matmul(layout, block, "sdd", trans_a=False, trans_b=True, device=value.device)
sparse_dot_dsd_nn = triton.ops.blocksparse.matmul(layout, block, "dsd", trans_a=False, trans_b=False, device=value.device)
sparse_softmax = triton.ops.blocksparse.softmax(layout, block, device=value.device)
w = sparse_dot_sdd_nt(query, key)
w = sparse_softmax(w, scale=scale, is_causal=True)
a = sparse_dot_dsd_nn(w, value)
return a
| triton-master | python/test/unit/operators/test_blocksparse.py |
import itertools
import pytest
import torch
import triton
import triton._C.libtriton.triton as _triton
@pytest.mark.parametrize(
"BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, NWARP, NSTAGE, M, N, K, AT, BT, DTYPE",
itertools.chain(
*[
[
# 1 warp
(16, 16, 16, 1, 1, 2, None, None, None, AT, BT, DTYPE),
(32, 16, 16, 1, 1, 2, None, None, None, AT, BT, DTYPE),
(16, 32, 16, 1, 1, 2, None, None, None, AT, BT, DTYPE),
(16, 16, 32, 1, 1, 2, None, None, None, AT, BT, DTYPE),
(32, 16, 32, 1, 1, 2, None, None, None, AT, BT, DTYPE),
(16, 32, 32, 1, 1, 2, None, None, None, AT, BT, DTYPE),
(16, 16, 64, 1, 1, 2, None, None, None, AT, BT, DTYPE),
(64, 16, 64, 1, 1, 2, None, None, None, AT, BT, DTYPE),
(16, 64, 64, 1, 1, 2, None, None, None, AT, BT, DTYPE),
# 2 warp
(64, 32, 64, 1, 2, 2, None, None, None, AT, BT, DTYPE),
(32, 64, 64, 1, 2, 2, None, None, None, AT, BT, DTYPE),
(64, 32, 16, 1, 2, 2, None, None, None, AT, BT, DTYPE),
(32, 64, 16, 1, 2, 2, None, None, None, AT, BT, DTYPE),
(128, 32, 32, 1, 2, 2, None, None, None, AT, BT, DTYPE),
(32, 128, 32, 1, 2, 2, None, None, None, AT, BT, DTYPE),
# 4 warp
(128, 64, 16, 1, 4, 2, None, None, None, AT, BT, DTYPE),
(64, 128, 16, 1, 4, 2, None, None, None, AT, BT, DTYPE),
(128, 32, 32, 1, 4, 2, None, None, None, AT, BT, DTYPE),
(32, 128, 32, 1, 4, 2, None, None, None, AT, BT, DTYPE),
(128, 32, 64, 1, 4, 2, None, None, None, AT, BT, DTYPE),
(32, 128, 64, 1, 4, 2, None, None, None, AT, BT, DTYPE),
# 8 warp
(128, 256, 16, 1, 8, 2, None, None, None, AT, BT, DTYPE),
(256, 128, 16, 1, 8, 2, None, None, None, AT, BT, DTYPE),
(256, 128, 32, 1, 8, 2, None, None, None, AT, BT, DTYPE),
# split-k
(64, 64, 16, 2, 4, 2, None, None, None, AT, BT, DTYPE),
(64, 64, 16, 4, 4, 2, None, None, None, AT, BT, DTYPE),
(64, 64, 16, 8, 4, 2, None, None, None, AT, BT, DTYPE),
# variable input
(128, 128, 32, 1, 4, 2, 1024, 1024, 1024, AT, BT, DTYPE),
(128, 128, 32, 1, 4, 2, 384, 128, 640, AT, BT, DTYPE),
(128, 128, 32, 1, 4, 2, 107, 233, 256, AT, BT, DTYPE),
(128, 128, 32, 1, 4, 2, 107, 233, 311, AT, BT, DTYPE),
] for DTYPE in ["float16", "bfloat16", "float32"] for AT in [False, True] for BT in [False, True]
],
# n-stage
*[
[
(16, 16, 16, 1, 1, STAGES, 1024, 1024, 1024, AT, BT, DTYPE),
(64, 32, 64, 1, 2, STAGES, 1024, 1024, 1024, AT, BT, DTYPE),
(128, 64, 16, 1, 4, STAGES, 1024, 1024, 1024, AT, BT, DTYPE),
(256, 128, 32, 1, 8, STAGES, 1024, 1024, 1024, AT, BT, DTYPE),
(128, 128, 32, 1, 4, STAGES, 384, 128, 640, AT, BT, DTYPE),
# split-k
(64, 64, 16, 8, 4, STAGES, 1024, 1024, 1024, AT, BT, DTYPE),
(64, 64, 16, 8, 4, STAGES, 1024, 1024, 32, AT, BT, DTYPE),
] for DTYPE in ["float16", "bfloat16", "float32"] for AT in [False, True] for BT in [False, True] for STAGES in [2, 3, 4]
]
),
)
def test_op(BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, NWARP, NSTAGE, M, N, K, AT, BT, DTYPE):
cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device())
if cc < 80 and DTYPE == "bfloat16":
pytest.skip("Only test bfloat16 on devices with sm >= 80")
if DTYPE == "bfloat16" and SPLIT_K != 1:
pytest.skip("bfloat16 matmuls don't allow split_k for now")
torch.manual_seed(0)
# nuke kernel decorators -- will set meta-parameters manually
kwargs = {'BLOCK_M': BLOCK_M, 'BLOCK_N': BLOCK_N, 'BLOCK_K': BLOCK_K, 'SPLIT_K': SPLIT_K}
pre_hook = None if SPLIT_K == 1 else lambda nargs: nargs['C'].zero_()
configs = [triton.Config(kwargs=kwargs, num_warps=NWARP, num_stages=NSTAGE, pre_hook=pre_hook)]
kernel = triton.ops._matmul.kernel
decorators = kernel.kernel_decorators
kernel.kernel_decorators = []
triton.autotune(configs, [])(kernel)
kernel.kernel_decorators += decorators[1:]
# get matrix shape
M = BLOCK_M if M is None else M
N = BLOCK_N if N is None else N
K = BLOCK_K * SPLIT_K if K is None else K
# allocate/transpose inputs
DTYPE = {"float16": torch.float16, "bfloat16": torch.bfloat16, "float32": torch.float32}[DTYPE]
a = .1 * torch.randn((K, M) if AT else (M, K), device="cuda", dtype=DTYPE)
b = .1 * torch.randn((N, K) if BT else (K, N), device="cuda", dtype=DTYPE)
a = a.t() if AT else a
b = b.t() if BT else b
# run test
th_c = torch.matmul(a, b)
tt_c = triton.testing.catch_oor(lambda: triton.ops.matmul(a, b), pytest)
triton.testing.assert_almost_equal(th_c, tt_c)
| triton-master | python/test/unit/operators/test_matmul.py |
import os
import re
import shutil
import pytest
import torch
import triton
import triton.language as tl
from triton.code_gen import JITFunction
tmpdir = ".tmp"
@triton.jit
def function_1(i):
i = i + 1
i = function_2(i)
return i
@triton.jit
def function_2(i):
i = i + 1
return i
@triton.jit
def kernel(X, i, BLOCK: tl.constexpr):
i = i + 1
i = function_1(i)
tl.store(X, i)
@triton.jit(do_not_specialize=["i"])
def kernel_nospec(X, i, BLOCK: tl.constexpr):
i = i + 1
i = function_1(i)
tl.store(X, i)
def apply_src_change(target, old, new):
kernel.hash = None
function_1.hash = None
function_2.hash = None
function_1.src = function_1.src.replace(old, new)
target.src = target.src.replace(old, new)
ret = target.cache_key
target.src = target.src.replace(new, old)
return ret
def test_nochange():
baseline = kernel.cache_key
updated = apply_src_change(kernel, 'i + 1', 'i + 1')
assert baseline == updated
def test_toplevel_change():
baseline = kernel.cache_key
updated = apply_src_change(kernel, 'i + 1', 'i + 2')
assert baseline != updated
def test_nested1_change():
baseline = kernel.cache_key
updated = apply_src_change(function_1, 'i + 1', 'i + 2')
assert baseline != updated
def reset_tmp_dir():
os.environ["TRITON_CACHE_DIR"] = tmpdir
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
def test_reuse():
counter = 0
def inc_counter(*args, **kwargs):
nonlocal counter
counter += 1
JITFunction.cache_hook = inc_counter
reset_tmp_dir()
x = torch.empty(1, dtype=torch.int32, device='cuda')
for i in range(10):
kernel[(1,)](x, 1, BLOCK=1024)
assert counter == 1
@pytest.mark.parametrize('mode', ['enable', 'disable'])
def test_specialize(mode):
counter = 0
def inc_counter(*args, **kwargs):
nonlocal counter
counter += 1
JITFunction.cache_hook = inc_counter
reset_tmp_dir()
x = torch.empty(1, dtype=torch.int32, device='cuda')
function = {'enable': kernel, 'disable': kernel_nospec}[mode]
target = {'enable': 5, 'disable': 1}[mode]
for i in [1, 2, 4, 8, 16, 32]:
function[(1,)](x, i, BLOCK=512)
assert counter == target
@pytest.mark.parametrize("value, value_type", [
(-1, 'int32'), (0, 'int32'), (1, None), (-2**31, 'int32'), (2**31 - 1, 'int32'),
(2**32, 'int64'), (2**63 - 1, 'int64'), (-2**63, 'int64'),
(2**31, 'uint32'), (2**32 - 1, 'uint32'), (2**63, 'uint64'), (2**64 - 1, 'uint64')
])
def test_value_specialization(value: int, value_type: str, device='cuda') -> None:
@triton.jit
def kernel(VALUE, X):
pass
cache_str = None
def get_cache_str(*args, **kwargs):
nonlocal cache_str
cache_str = kwargs['key'].split('-')
triton.code_gen.JITFunction.cache_hook = get_cache_str
reset_tmp_dir()
x = torch.tensor([3.14159], device='cuda')
kernel[(1, )](value, x)
triton.code_gen.JITFunction.cache_hook = None
cache_str_match = re.match(r'_(\w+)\[multipleof\(\d+\)]_float32\*\[multipleof\(16\)\]', cache_str[-1])
spec_type = None if cache_str_match is None else cache_str_match.group(1)
assert spec_type == value_type
def test_constexpr_not_callable() -> None:
@triton.jit
def kernel(X, c: tl.constexpr):
tl.store(X, 2)
x = torch.empty(1, dtype=torch.int32, device='cuda')
error = False
try:
kernel[(1, )](x, c="str")
except BaseException:
error = True
assert error is False
# try and catch
try:
kernel[(1, )](x, c=tl.abs)
except BaseException:
error = True
assert error is True
| triton-master | python/test/unit/runtime/test_cache.py |
# flake8: noqa: F821,F841
import itertools
import re
from typing import Optional, Union
import numpy as np
import pytest
import torch
from numpy.random import RandomState
import triton
import triton._C.libtriton.triton as _triton
import triton.language as tl
from triton.code_gen import JITFunction, TensorWrapper, reinterpret
int_dtypes = ['int8', 'int16', 'int32', 'int64']
uint_dtypes = ['uint8', 'uint16', 'uint32', 'uint64']
float_dtypes = ['float16', 'float32', 'float64']
dtypes = int_dtypes + uint_dtypes + float_dtypes
dtypes_with_bfloat16 = dtypes + ['bfloat16']
def _bitwidth(dtype: str) -> int:
# ex.: "int64" -> 64
return int(re.search(r'(\d+)$', dtype).group(1))
def numpy_random(shape, dtype_str, rs: Optional[RandomState] = None, low=None, high=None):
"""
Override `rs` if you're calling this function twice and don't want the same
result for both calls.
"""
if isinstance(shape, int):
shape = (shape, )
if rs is None:
rs = RandomState(seed=17)
if dtype_str in int_dtypes + uint_dtypes:
iinfo = np.iinfo(getattr(np, dtype_str))
low = iinfo.min if low is None else max(low, iinfo.min)
high = iinfo.max if high is None else min(high, iinfo.max)
dtype = getattr(np, dtype_str)
x = rs.randint(low, high, shape, dtype=dtype)
x[x == 0] = 1 # Hack. Never return zero so tests of division don't error out.
return x
elif dtype_str in float_dtypes:
return rs.normal(0, 1, shape).astype(dtype_str)
elif dtype_str == 'bfloat16':
return (rs.normal(0, 1, shape).astype('float32').view('uint32')
& np.uint32(0xffff0000)).view('float32')
elif dtype_str in ['bool', 'int1', 'bool_']:
return rs.normal(0, 1, shape) > 0.0
else:
raise RuntimeError(f'Unknown dtype {dtype_str}')
def to_triton(x: np.ndarray, device='cuda', dst_type=None) -> Union[TensorWrapper, torch.Tensor]:
'''
Note: We need dst_type becasue the type of x can be different from dst_type.
For example: x is of type `float32`, dst_type is `bfloat16`.
If dst_type is None, we infer dst_type from x.
'''
t = x.dtype.name
if t in uint_dtypes:
signed_type_name = t.lstrip('u') # e.g. "uint16" -> "int16"
x_signed = x.astype(getattr(np, signed_type_name))
return reinterpret(torch.tensor(x_signed, device=device), getattr(tl, t))
else:
if t == 'float32' and dst_type == 'bfloat16':
return torch.tensor(x, device=device).bfloat16()
return torch.tensor(x, device=device)
def torch_dtype_name(dtype) -> str:
if isinstance(dtype, triton.language.dtype):
return dtype.name
elif isinstance(dtype, torch.dtype):
# 'torch.int64' -> 'int64'
m = re.match(r'^torch\.(\w+)$', str(dtype))
return m.group(1)
else:
raise TypeError(f'not a triton or torch dtype: {type(dtype)}')
def to_numpy(x):
if isinstance(x, TensorWrapper):
return x.base.cpu().numpy().astype(getattr(np, torch_dtype_name(x.dtype)))
elif isinstance(x, torch.Tensor):
if x.dtype is torch.bfloat16:
return x.cpu().float().numpy()
return x.cpu().numpy()
else:
raise ValueError(f"Not a triton-compatible tensor: {x}")
def patch_kernel(template, to_replace):
kernel = triton.JITFunction(template.fn)
for key, value in to_replace.items():
kernel.src = kernel.src.replace(key, value)
return kernel
def check_type_supported(dtype):
'''
skip test if dtype is not supported on the current device
'''
cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device())
if cc < 80 and (dtype is tl.bfloat16 or dtype == "bfloat16" or dtype is torch.bfloat16):
pytest.skip("bfloat16 is only supported on NVGPU with cc >= 80")
@pytest.mark.parametrize("dtype_x", [dtype_x for dtype_x in dtypes] + ["bfloat16"])
def test_empty_kernel(dtype_x, device='cuda'):
SIZE = 128
@triton.jit
def kernel(X, SIZE: tl.constexpr):
pass
check_type_supported(dtype_x)
x = to_triton(numpy_random(SIZE, dtype_str=dtype_x), device=device, dst_type=dtype_x)
kernel[(1, )](x, SIZE=SIZE, num_warps=4)
# generic test functions
def _test_unary(dtype_x, expr, numpy_expr=None, device='cuda'):
check_type_supported(dtype_x) # early return if dtype_x is not supported
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, SIZE: tl.constexpr):
off = tl.arange(0, SIZE)
x = tl.load(X + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
x = numpy_random(SIZE, dtype_str=dtype_x)
if 'log' in expr:
x = np.abs(x) + 0.01
# reference result
z_ref = eval(expr if numpy_expr is None else numpy_expr)
# triton result
x_tri = to_triton(x, device=device, dst_type=dtype_x)
z_tri = to_triton(np.empty_like(z_ref), device=device, dst_type=dtype_x)
kernel[(1, )](z_tri, x_tri, SIZE=SIZE, num_warps=4)
# compare
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
def _binary_op_dtype_override(a: str, b: str) -> Optional[np.dtype]:
"""
Given two dtype strings, returns the numpy dtype Triton thinks binary
operations on the two types should return. Returns None if the return value
matches numpy. This is generally needed because Triton and pytorch return
narrower floating point types than numpy in mixed operations, and because
Triton follows C/C++ semantics around mixed signed/unsigned operations, and
numpy/pytorch do not.
"""
overrides = {
('float16', 'int16'): np.float16,
('float16', 'int32'): np.float16,
('float16', 'int64'): np.float16,
('float16', 'uint16'): np.float16,
('float16', 'uint32'): np.float16,
('float16', 'uint64'): np.float16,
('int8', 'uint8'): np.uint8,
('int8', 'uint16'): np.uint16,
('int8', 'uint32'): np.uint32,
('int8', 'uint64'): np.uint64,
('int16', 'uint16'): np.uint16,
('int16', 'uint32'): np.uint32,
('int16', 'uint64'): np.uint64,
('int32', 'uint32'): np.uint32,
('int32', 'uint64'): np.uint64,
('int64', 'uint64'): np.uint64,
}
key = (a, b) if a < b else (b, a)
return overrides.get(key)
def _test_binary(dtype_x, dtype_y, expr, numpy_expr=None, mode_x='real', mode_y='real', device='cuda', y_low=None, y_high=None):
check_type_supported(dtype_x) # early return if dtype_x is not supported
check_type_supported(dtype_y)
SIZE = 128
# define the kernel / launch-grid
@triton.jit
def kernel(Z, X, Y, SIZE: tl.constexpr):
off = tl.arange(0, SIZE)
x = tl.load(X + off)
y = tl.load(Y + off)
z = GENERATE_TEST_HERE
tl.store(Z + off, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': expr})
# inputs
rs = RandomState(17)
x = numpy_random(SIZE, dtype_str=dtype_x, rs=rs)
y = numpy_random(SIZE, dtype_str=dtype_y, rs=rs, low=y_low, high=y_high)
if mode_x == 'nan':
x[:] = float('nan')
if mode_y == 'nan':
y[:] = float('nan')
# reference result
z_ref = eval(expr if numpy_expr is None else numpy_expr)
dtype_z = _binary_op_dtype_override(dtype_x, dtype_y)
if dtype_z is not None:
z_ref = z_ref.astype(dtype_z)
# triton result
x_tri = to_triton(x, device=device, dst_type=dtype_x)
y_tri = to_triton(y, device=device, dst_type=dtype_y)
z_tri = to_triton(np.empty(SIZE, dtype=z_ref.dtype), device=device)
kernel[(1, )](z_tri, x_tri, y_tri, SIZE=SIZE, num_warps=4)
np.testing.assert_allclose(z_ref, to_numpy(z_tri), err_msg=expr, rtol=0.01)
def _mod_operation_ill_conditioned(dtype_x, dtype_y) -> bool:
# The result of x % y is ill-conditioned if x % y is much smaller than x.
# pytorch/CUDA has slightly different (probably better) rounding on
# remainders than stock LLVM. We currently don't expect to match it
# bit-for-bit.
return (dtype_x, dtype_y) in [
('int32', 'bfloat16'),
('int32', 'float16'),
('int32', 'float32'),
('int64', 'bfloat16'),
('int64', 'float16'),
('int64', 'float32'),
('int64', 'float64'),
('uint16', 'bfloat16'),
('uint16', 'float16'),
('uint16', 'float32'),
('uint32', 'bfloat16'),
('uint32', 'float16'),
('uint32', 'float32'),
('uint64', 'bfloat16'),
('uint64', 'float16'),
('uint64', 'float32'),
('uint64', 'float64'),
]
# ---------------
# test binary ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['+', '-', '*', '/', '%']
for dtype_x in dtypes_with_bfloat16
for dtype_y in dtypes_with_bfloat16
])
def test_bin_op(dtype_x, dtype_y, op, device='cuda'):
expr = f' x {op} y'
if op == '%' and dtype_x in int_dtypes + uint_dtypes and dtype_y in int_dtypes + uint_dtypes:
# LLVM has 'numpy.fmod', not 'numpy.remainder', semantics on integer remainders.
numpy_expr = 'np.fmod(x, y)'
elif op in ('/', '%') and dtype_x in ('int16', 'float16', 'bfloat16') and dtype_y in ('int16', 'float16', 'bfloat16'):
# Triton promotes 16-bit floating-point / and % to 32-bit because there
# are no native div or FRem operations on float16. Since we have to
# convert anyway, we may as well take the accuracy bump.
numpy_expr = f'x.astype(np.float32) {op} y.astype(np.float32)'
elif (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
if op == '%' and _mod_operation_ill_conditioned(dtype_x, dtype_y):
with pytest.raises(AssertionError, match='Not equal to tolerance'):
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
elif (op in ('%', '/') and
((dtype_x in int_dtypes and dtype_y in uint_dtypes) or
(dtype_x in uint_dtypes and dtype_y in int_dtypes))):
with pytest.raises(triton.code_gen.CompilationError) as exc_info:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
assert re.match('Cannot use .* because they have different signedness', str(exc_info.value.__cause__))
else:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
@pytest.mark.parametrize("dtype_x, dtype_y",
[(dtype_x, dtype_y) for dtype_x in int_dtypes for dtype_y in int_dtypes] +
[(dtype_x, dtype_y) for dtype_x in uint_dtypes for dtype_y in uint_dtypes]
)
def test_floordiv(dtype_x, dtype_y, device='cuda'):
# Triton has IEEE, not numpy/torch, semantics for %, and those carry
# through to //, so we have to use a nonstandard expression to get a
# reference result for //.
expr = 'x // y'
numpy_expr = '((x - np.fmod(x, y)) / y)'
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
# ---------------
# test bitwise ops
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['&', '|', '^']
for dtype_x in dtypes + dtypes_with_bfloat16
for dtype_y in dtypes + dtypes_with_bfloat16
])
def test_bitwise_op(dtype_x, dtype_y, op, device='cuda'):
expr = f'x {op} y'
if (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
if 'float' in dtype_x + dtype_y:
with pytest.raises(triton.code_gen.CompilationError) as exc_info:
_test_binary(dtype_x, dtype_y, expr, numpy_expr='np.array([])', device=device)
# The CompilationError must have been caused by a C++ exception with this text.
assert re.match('invalid operands of type', str(exc_info.value.__cause__))
else:
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device)
@pytest.mark.parametrize("dtype_x, dtype_y, op", [
(dtype_x, dtype_y, op)
for op in ['<<', '>>']
for dtype_x in int_dtypes + uint_dtypes
for dtype_y in int_dtypes + uint_dtypes
])
def test_shift_op(dtype_x, dtype_y, op, device='cuda'):
expr = f'x {op} y'
bw = max(_bitwidth(dtype_x), _bitwidth(dtype_y))
dtype_z = f'uint{bw}'
numpy_expr = f'x.astype(np.{dtype_z}) {op} y.astype(np.{dtype_z})'
_test_binary(dtype_x, dtype_y, expr, numpy_expr, device=device, y_low=0, y_high=65)
# ---------------
# test compare ops
# ---------------
ops = ['==', '!=', '>', '<', '>=', '<=']
@pytest.mark.parametrize("dtype_x, dtype_y, op, mode_x, mode_y",
# real
[
(dtype_x, dtype_y, op, 'real', 'real')
for op in ops
for dtype_x in dtypes
for dtype_y in dtypes
] +
# NaNs
[('float32', 'float32', op, mode_x, mode_y)
for op in ops
for mode_x, mode_y in [('nan', 'real'),
('real', 'nan'),
('nan', 'nan')]
])
def test_compare_op(dtype_x, dtype_y, op, mode_x, mode_y, device='cuda'):
expr = f'x {op} y'
if (dtype_x in uint_dtypes and dtype_y in int_dtypes and _bitwidth(dtype_x) >= _bitwidth(dtype_y)):
numpy_expr = f'x.astype(np.{dtype_x}) {op} y.astype(np.{dtype_x})'
elif (dtype_y in uint_dtypes and dtype_x in int_dtypes and _bitwidth(dtype_y) >= _bitwidth(dtype_x)):
numpy_expr = f'x.astype(np.{dtype_y}) {op} y.astype(np.{dtype_y})'
else:
numpy_expr = None
_test_binary(dtype_x, dtype_y, expr, numpy_expr, mode_x=mode_x, mode_y=mode_y, device=device)
# ---------------
# test where
# ---------------
@pytest.mark.parametrize("dtype", dtypes_with_bfloat16 + ["*int32"])
def test_where(dtype):
select_ptrs = False
if dtype == "*int32":
dtype = "int64"
select_ptrs = True
check_type_supported(dtype)
@triton.jit
def where_kernel(cond_ptr, a_ptr, b_ptr, output_ptr, n_elements,
BLOCK_SIZE: tl.constexpr,
TEST_POINTERS: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
decide = tl.load(cond_ptr + offsets, mask=mask)
if TEST_POINTERS:
a = tl.load(a_ptr + offsets, mask=mask).to(tl.pi32_t)
b = tl.load(b_ptr + offsets, mask=mask).to(tl.pi32_t)
else:
a = tl.load(a_ptr + offsets, mask=mask)
b = tl.load(b_ptr + offsets, mask=mask)
output = tl.where(decide, a, b)
tl.store(output_ptr + offsets, output, mask=mask)
SIZE = 1_000
rs = RandomState(17)
cond = numpy_random(SIZE, 'bool', rs)
x = numpy_random(SIZE, dtype_str=dtype, rs=rs)
y = numpy_random(SIZE, dtype_str=dtype, rs=rs)
z = np.where(cond, x, y)
cond_tri = to_triton(cond, device='cuda')
x_tri = to_triton(x, device='cuda', dst_type=dtype)
y_tri = to_triton(y, device='cuda', dst_type=dtype)
z_tri = to_triton(np.empty(SIZE, dtype=z.dtype), device='cuda', dst_type=dtype)
grid = lambda meta: (triton.cdiv(SIZE, meta['BLOCK_SIZE']),)
where_kernel[grid](cond_tri, x_tri, y_tri, z_tri, SIZE, BLOCK_SIZE=1024, TEST_POINTERS=select_ptrs)
assert (z == to_numpy(z_tri)).all()
# ---------------
# test unary ops
# ---------------
@pytest.mark.parametrize("dtype_x, expr", [
(dtype_x, ' -x') for dtype_x in dtypes_with_bfloat16
] + [
(dtype_x, ' ~x') for dtype_x in int_dtypes
])
def test_unary_op(dtype_x, expr, device='cuda'):
_test_unary(dtype_x, expr, device=device)
# ----------------
# test math ops
# ----------------
# @pytest.mark.paramterize("expr", [
# 'exp', 'log', 'cos', 'sin'
# ])
@pytest.mark.parametrize("expr", [
'exp', 'log', 'cos', 'sin'
])
def test_math_op(expr, device='cuda'):
_test_unary('float32', f'tl.{expr}(x)', f'np.{expr}(x) ', device=device)
# ----------------
# test indexing
# ----------------
def make_ptr_str(name, shape):
rank = len(shape)
offsets = []
stride = 1
for i in reversed(range(rank)):
idx = ', '.join([':' if ii == i else 'None' for ii in range(rank)])
offsets += [f'tl.arange(0, {shape[i]})[{idx}]*{stride}']
stride *= shape[i]
return f"{name} + {' + '.join(offsets)}"
@pytest.mark.parametrize("expr, dtype_str", [
(f'x[{s}]', d)
for s in ['None, :', ':, None', 'None, :, :', ':, :, None']
for d in ['int32', 'uint32', 'uint16']
])
def test_index1d(expr, dtype_str, device='cuda'):
rank_x = expr.count(':')
rank_y = expr.count(',') + 1
shape_x = [32 for _ in range(rank_x)]
shape_z = [32 for _ in range(rank_y)]
shape_z_rank_mismatch = [32 for _ in range(rank_y + 1)]
shape_z_dim_mismatch = [64 for _ in range(rank_y)]
# Triton kernel
@triton.jit
def kernel(Z, X, SIZE: tl.constexpr):
m = tl.arange(0, SIZE)
n = tl.arange(0, SIZE)
x = tl.load(X_PTR_EXPR)
z = GENERATE_TEST_HERE
tl.store(Z_PTR_EXPR, z)
def generate_kernel(shape_x, shape_z):
to_replace = {
'X_PTR_EXPR': make_ptr_str('X', shape_x),
'Z_PTR_EXPR': make_ptr_str('Z', shape_z),
'GENERATE_TEST_HERE': expr,
}
return patch_kernel(kernel, to_replace)
kernel_match = generate_kernel(shape_x, shape_z)
kernel_dim_mismatch = generate_kernel(shape_x, shape_z_dim_mismatch)
kernel_rank_mismatch = generate_kernel(shape_x, shape_z_rank_mismatch)
# torch result
x = numpy_random(shape_x, dtype_str=dtype_str)
y = np.zeros(shape_z, dtype=getattr(np, dtype_str))
z_ref = eval(expr) + y
# triton result
z_tri = to_triton(np.empty_like(z_ref), device=device)
x_tri = to_triton(x)
kernel_match[(1, )](z_tri, x_tri, num_warps=1, SIZE=shape_x[0])
# compare
assert (z_ref == to_numpy(z_tri)).all()
def catch_compilation_error(kernel):
try:
kernel[(1, )](z_tri, x_tri, num_warps=1, SIZE=shape_x[0])
except triton.code_gen.CompilationError as e:
np.testing.assert_(True)
except BaseException:
np.testing.assert_(False)
catch_compilation_error(kernel_dim_mismatch)
catch_compilation_error(kernel_rank_mismatch)
# ---------------
# test tuples
# ---------------
@triton.jit
def fn(a, b):
return a + b, \
a - b, \
a * b
def test_tuples():
device = 'cuda'
@triton.jit
def with_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = fn(x, y)
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
@triton.jit
def without_fn(X, Y, A, B, C):
x = tl.load(X)
y = tl.load(Y)
a, b, c = x + y, x - y, x * y
tl.store(A, a)
tl.store(B, b)
tl.store(C, c)
x = torch.tensor([1.3], device=device, dtype=torch.float32)
y = torch.tensor([1.9], device=device, dtype=torch.float32)
a_tri = torch.tensor([0], device=device, dtype=torch.float32)
b_tri = torch.tensor([0], device=device, dtype=torch.float32)
c_tri = torch.tensor([0], device=device, dtype=torch.float32)
for kernel in [with_fn, without_fn]:
kernel[(1, )](x, y, a_tri, b_tri, c_tri, num_warps=1)
a_ref, b_ref, c_ref = x + y, x - y, x * y
assert a_tri == a_ref
assert b_tri == b_ref
assert c_tri == c_ref
# ---------------
# test atomics
# ---------------
@pytest.mark.parametrize("op, dtype_x_str, mode", itertools.chain.from_iterable([
[
('add', 'float16', mode),
('add', 'uint32', mode), ('add', 'int32', mode), ('add', 'float32', mode),
('max', 'uint32', mode), ('max', 'int32', mode), ('max', 'float32', mode),
('min', 'uint32', mode), ('min', 'int32', mode), ('min', 'float32', mode),
]
for mode in ['all_neg', 'all_pos', 'min_neg', 'max_pos']]))
def test_atomic_rmw(op, dtype_x_str, mode, device='cuda'):
n_programs = 5
# triton kernel
@triton.jit
def kernel(X, Z):
pid = tl.program_id(0)
x = tl.load(X + pid)
old = GENERATE_TEST_HERE
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.atomic_{op}(Z, x)'})
numpy_op = {'add': np.sum, 'max': np.max, 'min': np.min}[op]
max_neutral = float('-inf') if dtype_x_str in float_dtypes else np.iinfo(getattr(np, dtype_x_str)).min
min_neutral = float('inf') if dtype_x_str in float_dtypes else np.iinfo(getattr(np, dtype_x_str)).max
neutral = {'add': 0, 'max': max_neutral, 'min': min_neutral}[op]
# triton result
rs = RandomState(17)
x = numpy_random((n_programs, ), dtype_str=dtype_x_str, rs=rs)
if mode == 'all_neg':
x = -np.abs(x)
if mode == 'all_pos':
x = np.abs(x)
if mode == 'min_neg':
idx = rs.randint(n_programs, size=(1, )).item()
x[idx] = -np.max(np.abs(x)) - 1
if mode == 'max_pos':
idx = rs.randint(n_programs, size=(1, )).item()
x[idx] = np.max(np.abs(x)) + 1
x_tri = to_triton(x, device=device)
z_tri = to_triton(np.array([neutral], dtype=getattr(np, dtype_x_str)), device=device)
kernel[(n_programs, )](x_tri, z_tri)
# torch result
z_ref = numpy_op(x).astype(getattr(np, dtype_x_str))
# compare
exact = op not in ['add']
if exact:
assert z_ref.item() == to_numpy(z_tri).item()
else:
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
@pytest.mark.parametrize("axis", [0, 1])
def test_tensor_atomic_rmw(axis, device="cuda"):
shape0, shape1 = 8, 8
# triton kernel
@triton.jit
def kernel(Z, X, AXIS: tl.constexpr, SHAPE0: tl.constexpr, SHAPE1: tl.constexpr):
off0 = tl.arange(0, SHAPE0)
off1 = tl.arange(0, SHAPE1)
x = tl.load(X + off0[:, None] * SHAPE1 + off1[None, :])
z = tl.sum(x, axis=AXIS)
tl.atomic_add(Z + off0, z)
rs = RandomState(17)
x = numpy_random((shape0, shape1), dtype_str="float32", rs=rs)
# reference result
z_ref = np.sum(x, axis=axis)
# triton result
x_tri = to_triton(x, device=device)
z_tri = to_triton(np.zeros((shape0,), dtype="float32"), device=device)
kernel[(1,)](z_tri, x_tri, axis, shape0, shape1)
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=1e-4)
def test_atomic_cas():
# 1. make sure that atomic_cas changes the original value (Lock)
@triton.jit
def change_value(Lock):
tl.atomic_cas(Lock, 0, 1)
Lock = torch.zeros((1,), device='cuda', dtype=torch.int32)
change_value[(1,)](Lock)
assert(Lock[0] == 1)
# 2. only one block enters the critical section
@triton.jit
def serialized_add(data, Lock):
ptrs = data + tl.arange(0, 128)
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
tl.store(ptrs, tl.load(ptrs) + 1.0)
# release lock
tl.atomic_xchg(Lock, 0)
Lock = torch.zeros((1,), device='cuda', dtype=torch.int32)
data = torch.zeros((128,), device='cuda', dtype=torch.float32)
ref = torch.full((128,), 64.0)
serialized_add[(64,)](data, Lock)
triton.testing.assert_almost_equal(data, ref)
# ---------------
# test cast
# ---------------
@pytest.mark.parametrize("dtype_x, dtype_z, bitcast", [
(dtype_x, dtype_z, False)
for dtype_x in dtypes
for dtype_z in dtypes
] + [
('float32', 'bfloat16', False),
('bfloat16', 'float32', False),
('float32', 'int32', True),
('float32', 'int1', False),
] + [
(f'uint{x}', f'int{x}', True) for x in [8, 16, 32, 64]
] + [
(f'int{x}', f'uint{x}', True) for x in [8, 16, 32, 64]
])
def test_cast(dtype_x, dtype_z, bitcast, device='cuda'):
# This is tricky because numpy doesn't have bfloat, and torch doesn't have uints.
x0 = 43 if dtype_x in int_dtypes else 43.5
if dtype_x in float_dtypes and dtype_z == 'int1':
x0 = 0.5
if dtype_x.startswith('bfloat'):
x_tri = torch.tensor([x0], dtype=getattr(torch, dtype_x), device=device)
else:
x = np.array([x0], dtype=getattr(np, dtype_x))
x_tri = to_triton(x)
# triton kernel
@triton.jit
def kernel(X, Z, BITCAST: tl.constexpr):
x = tl.load(X)
z = x.to(Z.dtype.element_ty, bitcast=BITCAST)
tl.store(Z, z)
dtype_z_np = dtype_z if dtype_z != 'int1' else 'bool_'
# triton result
if dtype_z.startswith('bfloat'):
z_tri = torch.empty((1,), dtype=getattr(torch, dtype_z), device=device)
else:
z_tri = to_triton(np.empty((1, ), dtype=getattr(np, dtype_z_np)), device=device)
kernel[(1, )](x_tri, z_tri, BITCAST=bitcast)
# torch result
if dtype_z.startswith('bfloat') or dtype_x.startswith('bfloat'):
assert bitcast is False
z_ref = x_tri.to(z_tri.dtype)
assert z_tri == z_ref
else:
if bitcast:
z_ref = x.view(getattr(np, dtype_z_np))
else:
z_ref = x.astype(getattr(np, dtype_z_np))
assert to_numpy(z_tri) == z_ref
def test_f8_f16_roundtrip():
"""Tests that converting an f8 to f16 and back to f8 doesn't change its value"""
@triton.jit
def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
input = tl.load(input_ptr + offsets, mask=mask)
output = input
tl.store(output_ptr + offsets, output, mask=mask)
f8_tensor = torch.tensor(range(-128, 128), dtype=torch.int8, device='cuda')
f8 = triton.reinterpret(f8_tensor, tl.float8)
n_elements = f8_tensor.numel()
f16 = torch.empty_like(f8_tensor, dtype=torch.float16)
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
copy_kernel[grid](f8, f16, n_elements, BLOCK_SIZE=1024)
f8_output_tensor = torch.empty_like(f16, dtype=torch.int8)
f8_output = triton.reinterpret(f8_output_tensor, tl.float8)
copy_kernel[grid](f16, f8_output, n_elements, BLOCK_SIZE=1024)
assert torch.all(f8_tensor == f8_output_tensor)
def test_f16_to_f8_rounding():
"""Takes all float16s, converts them to float8 and back to float16. Checks that the absolute
error is the minimum over all float8.
Or the same explanation a bit mathier:
for all f16 |f16 - fromf8(tof8(f16))| == min over all f8 |f16 - fromf8(f8)|"""
@triton.jit
def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
input = tl.load(input_ptr + offsets, mask=mask)
output = input
tl.store(output_ptr + offsets, output, mask=mask)
# torch.view with a dtype isn't supported in triton's torch yet so use numpy's view
f16_input_np = (
np.array(
range(-int(2 ** (16 - 1)), int(2 ** (16 - 1))), dtype=np.int16,
)
.view(np.float16)
)
f16_input = torch.tensor(f16_input_np, dtype=torch.float16, device='cuda')
n_elements = f16_input.numel()
f8_output_tensor = torch.empty_like(f16_input, dtype=torch.int8)
f8_output = triton.reinterpret(f8_output_tensor, tl.float8)
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
copy_kernel[grid](f16_input, f8_output, n_elements, BLOCK_SIZE=1024)
f16_output = torch.empty_like(f16_input, dtype=torch.float16)
copy_kernel[grid](f8_output, f16_output, n_elements, BLOCK_SIZE=1024)
abs_error = torch.abs(f16_input - f16_output)
all_f8_vals_tensor = torch.tensor(range(2 ** 8), dtype=torch.uint8, device='cuda')
all_f8_vals = triton.reinterpret(all_f8_vals_tensor, tl.float8)
all_f8_vals_in_f16 = torch.empty_like(all_f8_vals_tensor, dtype=torch.float16)
copy_kernel[grid](all_f8_vals, all_f8_vals_in_f16, n_elements=256, BLOCK_SIZE=1024)
all_finite_f8_vals_in_f16 = all_f8_vals_in_f16[
torch.isfinite(all_f8_vals_in_f16)
]
min_error = torch.min(
torch.abs(
f16_input.reshape((-1, 1))
- all_finite_f8_vals_in_f16.reshape((1, -1))
),
dim=1,
)[0]
# 1.9375 is float8 max
mismatch = torch.logical_and(
abs_error != min_error, torch.logical_and(torch.isfinite(f16_input), torch.abs(f16_input) < 1.9375)
)
assert torch.all(
torch.logical_not(mismatch)
), f"f16_input[mismatch]={f16_input[mismatch]} f16_output[mismatch]={f16_output[mismatch]} abs_error[mismatch]={abs_error[mismatch]} min_error[mismatch]={min_error[mismatch]}"
# ---------------
# test reduce
# ---------------
@pytest.mark.parametrize("op, dtype_str, shape",
[(op, dtype, shape)
for op in ['min', 'max', 'argmin', 'argmax', 'sum']
for dtype in dtypes_with_bfloat16
for shape in [32, 64, 128, 512]])
def test_reduce1d(op, dtype_str, shape, device='cuda'):
check_type_supported(dtype_str) # bfloat16 on cc < 80 will not be tested
# triton kernel
@triton.jit
def kernel(X, Z, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
tl.store(Z, GENERATE_TEST_HERE)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.{op}(x, axis=0)'})
# input
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random((shape,), dtype_str=dtype_str, rs=rs)
x_tri = to_triton(x, device=device)
numpy_op = {'sum': np.sum, 'max': np.max, 'min': np.min,
'argmin': np.argmin, 'argmax': np.argmax}[op]
# numpy result
z_dtype_str = 'int32' if op == 'argmin' or op == 'argmax' else dtype_str
z_tri_dtype_str = z_dtype_str
if op not in ['argmin', 'argmax'] and dtype_str == 'bfloat16':
z_dtype_str = 'float32'
z_ref = numpy_op(x).astype(getattr(np, z_dtype_str))
# trunc mantissa for a fair comparison of accuracy
z_ref = (z_ref.view('uint32') & np.uint32(0xffff0000)).view('float32')
z_tri_dtype_str = 'bfloat16'
else:
z_ref = numpy_op(x).astype(getattr(np, z_dtype_str))
# triton result
z_tri = to_triton(numpy_random((1,), dtype_str=z_dtype_str, rs=rs),
device=device, dst_type=z_tri_dtype_str)
kernel[(1,)](x_tri, z_tri, BLOCK=shape)
z_tri = to_numpy(z_tri)
# compare
if op == 'sum':
np.testing.assert_allclose(z_ref, z_tri, rtol=0.01)
else:
if op == 'argmin' or op == 'argmax':
# argmin and argmax can have multiple valid indices.
# so instead we compare the values pointed by indices
np.testing.assert_equal(x[z_ref], x[z_tri])
else:
np.testing.assert_equal(z_ref, z_tri)
reduce_configs1 = [
(op, dtype, (1, 1024), axis) for dtype in dtypes_with_bfloat16
for op in ['min', 'max', 'argmin', 'argmax', 'sum']
for axis in [1]
]
reduce_configs2 = [
(op, 'float32', shape, axis)
for op in ['min', 'max', 'argmin', 'argmax', 'sum']
for shape in [(2, 32), (4, 32), (4, 128), (32, 64), (64, 128), (128, 256), (32, 1024)]
for axis in [0, 1]
]
@pytest.mark.parametrize("op, dtype_str, shape, axis", reduce_configs1 + reduce_configs2)
def test_reduce2d(op, dtype_str, shape, axis, device='cuda'):
# triton kernel
@triton.jit
def kernel(X, Z, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, AXIS: tl.constexpr):
range_m = tl.arange(0, BLOCK_M)
range_n = tl.arange(0, BLOCK_N)
x = tl.load(X + range_m[:, None] * BLOCK_N + range_n[None, :])
z = GENERATE_TEST_HERE
if AXIS == 1:
tl.store(Z + range_m, z)
else:
tl.store(Z + range_n, z)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f'tl.{op}(x, axis=AXIS)'})
# input
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random(shape, dtype_str=dtype_str, rs=rs)
x_tri = to_triton(x)
numpy_op = {'sum': np.sum, 'max': np.max, 'min': np.min,
'argmin': np.argmin, 'argmax': np.argmax}[op]
z_dtype_str = 'int32' if op == 'argmin' or op == 'argmax' else dtype_str
z_tri_dtype_str = z_dtype_str
# numpy result
if op not in ['argmin', 'argmax'] and dtype_str == 'bfloat16':
z_dtype_str = 'float32'
z_tri_dtype_str = 'bfloat16'
z_ref = numpy_op(x, axis=axis).astype(getattr(np, z_dtype_str))
# trunc mantissa for a fair comparison of accuracy
z_ref = (z_ref.view('uint32') & np.uint32(0xffff0000)).view('float32')
else:
z_ref = numpy_op(x, axis=axis).astype(getattr(np, z_dtype_str))
# triton result
z_tri = to_triton(numpy_random((shape[1 - axis],), dtype_str=z_dtype_str, rs=rs),
device=device, dst_type=z_tri_dtype_str)
kernel[(1,)](x_tri, z_tri, BLOCK_M=shape[0], BLOCK_N=shape[1], AXIS=axis)
z_tri = to_numpy(z_tri)
# compare
if op == 'sum':
np.testing.assert_allclose(z_ref, z_tri, rtol=0.01)
else:
if op == 'argmin' or op == 'argmax':
# argmin and argmax can have multiple valid indices.
# so instead we compare the values pointed by indices
z_ref_index = np.expand_dims(z_ref, axis=axis)
z_tri_index = np.expand_dims(z_tri, axis=axis)
z_ref_value = np.take_along_axis(x, z_ref_index, axis=axis)
z_tri_value = np.take_along_axis(x, z_tri_index, axis=axis)
np.testing.assert_equal(z_ref_value, z_tri_value)
else:
np.testing.assert_equal(z_ref, z_tri)
# ---------------
# test permute
# ---------------
@pytest.mark.parametrize("dtype_str, shape, perm",
[(dtype, shape, perm)
for dtype in ['bfloat16', 'float16', 'float32']
for shape in [(64, 64), (128, 128)]
for perm in [(1, 0)]])
def test_permute(dtype_str, shape, perm, device='cuda'):
check_type_supported(dtype_str) # bfloat16 on cc < 80 will not be tested
# triton kernel
@triton.jit
def kernel(X, stride_xm, stride_xn,
Z, stride_zm, stride_zn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr):
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
Xs = X + off_m[:, None] * stride_xm + off_n[None, :] * stride_xn
Zs = Z + off_m[:, None] * stride_zm + off_n[None, :] * stride_zn
tl.store(Zs, tl.load(Xs))
# input
x = numpy_random(shape, dtype_str=dtype_str)
# triton result
z_tri = to_triton(np.empty_like(x), device=device, dst_type=dtype_str)
z_tri_contiguous = to_triton(np.empty_like(x), device=device, dst_type=dtype_str)
x_tri = to_triton(x, device=device, dst_type=dtype_str)
pgm = kernel[(1, 1)](x_tri, x_tri.stride(0), x_tri.stride(1),
z_tri, z_tri.stride(1), z_tri.stride(0),
BLOCK_M=shape[0], BLOCK_N=shape[1])
pgm_contiguous = kernel[(1, 1)](x_tri, x_tri.stride(1), x_tri.stride(0),
z_tri_contiguous, z_tri_contiguous.stride(0), z_tri_contiguous.stride(1),
BLOCK_M=shape[0], BLOCK_N=shape[1])
# numpy result
z_ref = x.transpose(*perm)
# compare
triton.testing.assert_almost_equal(z_tri, z_ref)
triton.testing.assert_almost_equal(z_tri_contiguous, z_ref)
# parse ptx to make sure ld/st are vectorized
ptx = pgm.asm['ptx']
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
ptx = pgm_contiguous.asm['ptx']
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
# ---------------
# test dot
# ---------------
@pytest.mark.parametrize("epilogue, allow_tf32, dtype",
[(epilogue, allow_tf32, dtype)
for epilogue in ['none', 'trans', 'add-matrix', 'add-rows', 'add-cols', 'softmax', 'chain-dot']
for allow_tf32 in [True, False]
for dtype in ['float16']
if not (allow_tf32 and (dtype in ['float16']))])
def test_dot(epilogue, allow_tf32, dtype, device='cuda'):
cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device())
if cc < 80:
if dtype == 'int8':
pytest.skip("Only test int8 on devices with sm >= 80")
elif dtype == 'float32' and allow_tf32:
pytest.skip("Only test tf32 on devices with sm >= 80")
M, N, K = 128, 128, 64
num_warps = 8
trans_a, trans_b = False, False
# triton kernel
@triton.jit
def kernel(X, stride_xm, stride_xk,
Y, stride_yk, stride_yn,
W, stride_wn, stride_wl,
Z, stride_zm, stride_zn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr,
ADD_MATRIX: tl.constexpr, ADD_ROWS: tl.constexpr, ADD_COLS: tl.constexpr,
ALLOW_TF32: tl.constexpr,
DO_SOFTMAX: tl.constexpr, CHAIN_DOT: tl.constexpr,
TRANS_A: tl.constexpr, TRANS_B: tl.constexpr):
off_m = tl.arange(0, BLOCK_M)
off_n = tl.arange(0, BLOCK_N)
off_l = tl.arange(0, BLOCK_N)
off_k = tl.arange(0, BLOCK_K)
Xs = X + off_m[:, None] * stride_xm + off_k[None, :] * stride_xk
Ys = Y + off_k[:, None] * stride_yk + off_n[None, :] * stride_yn
Ws = W + off_n[:, None] * stride_wn + off_l[None, :] * stride_wl
Zs = Z + off_m[:, None] * stride_zm + off_n[None, :] * stride_zn
z = tl.dot(tl.load(Xs), tl.load(Ys), trans_a=TRANS_A, trans_b=TRANS_B, allow_tf32=ALLOW_TF32)
if ADD_MATRIX:
z += tl.load(Zs)
if ADD_ROWS:
ZRs = Z + off_m * stride_zm
z += tl.load(ZRs)[:, None]
if ADD_COLS:
ZCs = Z + off_n * stride_zn
z += tl.load(ZCs)[None, :]
if DO_SOFTMAX:
max = tl.max(z, 1)
z = z - max[:, None]
num = tl.exp(z)
den = tl.sum(num, 1)
z = num / den[:, None]
if CHAIN_DOT:
# tl.store(Zs, z)
# tl.debug_barrier()
z = tl.dot(z.to(tl.float16), tl.load(Ws), trans_a=TRANS_A)
tl.store(Zs, z)
# input
rs = RandomState(17)
x = numpy_random((K, M) if trans_a else (M, K), dtype_str=dtype, rs=rs) * .1
y = numpy_random((N, K) if trans_b else (K, N), dtype_str=dtype, rs=rs) * .1
w = numpy_random((N, N), dtype_str=dtype, rs=rs) * .1
if allow_tf32:
x = (x.view('uint32') & np.uint32(0xffffe000)).view('float32')
y = (y.view('uint32') & np.uint32(0xffffe000)).view('float32')
w = (w.view('uint32') & np.uint32(0xffffe000)).view('float32')
x_tri = to_triton(x, device=device)
y_tri = to_triton(y, device=device)
w_tri = to_triton(w, device=device)
# triton result
z = 1 + numpy_random((M, N), dtype_str=dtype, rs=rs) * .1
z_tri = to_triton(z, device=device)
if epilogue == 'trans':
z_tri = torch.as_strided(z_tri, (M, N), z_tri.stride()[::-1])
pgm = kernel[(1, 1)](x_tri, x_tri.stride(0), x_tri.stride(1),
y_tri, y_tri.stride(0), y_tri.stride(1),
w_tri, w_tri.stride(0), w_tri.stride(1),
z_tri, z_tri.stride(0), z_tri.stride(1),
TRANS_A=trans_a, TRANS_B=trans_b,
BLOCK_M=M, BLOCK_K=K, BLOCK_N=N,
ADD_MATRIX=epilogue == 'add-matrix',
ADD_ROWS=epilogue == 'add-rows',
ADD_COLS=epilogue == 'add-cols',
DO_SOFTMAX=epilogue == 'softmax',
CHAIN_DOT=epilogue == 'chain-dot',
ALLOW_TF32=allow_tf32,
num_warps=num_warps)
# torch result
x_ref = x.T if trans_a else x
y_ref = y.T if trans_b else y
z_ref = np.matmul(x_ref, y_ref)
if epilogue == 'add-matrix':
z_ref += z
if epilogue == 'add-rows':
z_ref += z[:, 0][:, None]
if epilogue == 'add-cols':
z_ref += z[0, :][None, :]
if epilogue == 'softmax':
num = np.exp(z_ref - np.max(z_ref, axis=-1, keepdims=True))
denom = np.sum(num, axis=-1, keepdims=True)
z_ref = num / denom
if epilogue == 'chain-dot':
z_ref = np.matmul(z_ref.T if trans_a else z_ref, w)
# compare
# print(z_ref[:,0], z_tri[:,0])
np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01)
# make sure ld/st are vectorized
ptx = pgm.asm['ptx']
assert 'ld.global.v4' in ptx
assert 'st.global.v4' in ptx
if allow_tf32:
assert 'mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32' in ptx
elif dtype == 'float32':
assert 'mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32' not in ptx
elif dtype == 'int8':
assert 'mma.sync.aligned.m16n8k32.row.col.satfinite.s32.s8.s8.s32' in ptx
def test_dot_without_load():
@triton.jit
def kernel(out):
pid = tl.program_id(axis=0)
a = tl.zeros((32, 32), tl.float32)
b = tl.zeros((32, 32), tl.float32)
c = tl.zeros((32, 32), tl.float32)
c = tl.dot(a, b)
pout = out + tl.arange(0, 32)[:, None] * 32 + tl.arange(0, 32)[None, :]
tl.store(pout, c)
out = torch.ones((32, 32), dtype=torch.float32, device="cuda")
kernel[(1,)](out)
# ---------------
# test arange
# ---------------
@pytest.mark.parametrize("start", [0, 1, 7, 16])
def test_arange(start, device='cuda'):
BLOCK = 128
z_tri = torch.empty(BLOCK, dtype=torch.int32, device=device)
@triton.jit
def _kernel(z, BLOCK: tl.constexpr,
START: tl.constexpr, END: tl.constexpr):
off = tl.arange(0, BLOCK)
val = tl.arange(START, END)
tl.store(z + off, val)
_kernel[(1,)](z_tri, START=start, END=start + BLOCK, BLOCK=BLOCK)
z_ref = torch.arange(start, BLOCK + start, dtype=torch.int32, device=device)
triton.testing.assert_almost_equal(z_tri, z_ref)
# ---------------
# test load
# ---------------
# 'bfloat16': torch.bfloat16,
# Testing masked loads with an intermate copy to shared memory run.
@pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32])
def test_masked_load_shared_memory(dtype, device='cuda'):
check_type_supported(dtype) # bfloat16 on cc < 80 will not be tested
M = 32
N = 32
K = 16
in1 = torch.rand((M, K), dtype=dtype, device=device)
in2 = torch.rand((K, N), dtype=dtype, device=device)
out = torch.zeros((M, N), dtype=dtype, device=device)
@triton.jit
def _kernel(in1_ptr, in2_ptr, output_ptr,
in_stride, in2_stride, out_stride,
in_numel, in2_numel, out_numel,
M: tl.constexpr, N: tl.constexpr, K: tl.constexpr):
M_offsets = tl.arange(0, M)
N_offsets = tl.arange(0, N)
K_offsets = tl.arange(0, K)
in_offsets = M_offsets[:, None] * in_stride + K_offsets[None, :]
in2_offsets = K_offsets[:, None] * in2_stride + N_offsets[None, :]
# Load inputs.
x = tl.load(in1_ptr + in_offsets, mask=in_offsets < in_numel)
w = tl.load(in2_ptr + in2_offsets, mask=in2_offsets < in2_numel)
# Without a dot product the memory doesn't get promoted to shared.
o = tl.dot(x, w)
# Store output
output_offsets = M_offsets[:, None] * out_stride + N_offsets[None, :]
tl.store(output_ptr + output_offsets, o, mask=output_offsets < in2_numel)
pgm = _kernel[(1,)](in1, in2, out,
in1.stride()[0],
in2.stride()[0],
out.stride()[0],
in1.numel(),
in2.numel(),
out.numel(),
M=M, N=N, K=K)
reference_out = torch.matmul(in1, in2)
triton.testing.allclose(out, reference_out)
@pytest.mark.parametrize("cache", ["", ".ca", ".cg"])
def test_load_cache_modifier(cache):
src = torch.empty(128, device='cuda')
dst = torch.empty(128, device='cuda')
@triton.jit
def _kernel(dst, src, CACHE: tl.constexpr):
offsets = tl.arange(0, 128)
x = tl.load(src + offsets, cache_modifier=CACHE)
tl.store(dst + offsets, x)
pgm = _kernel[(1,)](dst, src, CACHE=cache)
ptx = pgm.asm['ptx']
if cache == '':
assert 'ld.global.ca' not in ptx
assert 'ld.global.cg' not in ptx
if cache == '.cg':
assert 'ld.global.cg' in ptx
assert 'ld.global.ca' not in ptx
if cache == '.ca':
assert 'ld.global.ca' in ptx
assert 'ld.global.cg' not in ptx
@pytest.mark.parametrize("N", [8, 10, 11, 1024])
def test_vectorization(N):
src = torch.empty(1024, device='cuda')
dst = torch.empty(1024, device='cuda')
@triton.jit
def _kernel(dst, src, N, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(src + offsets, mask=offsets < N)
tl.store(dst + offsets, x, mask=offsets < N)
pgm = _kernel[(1,)](dst, src, N=N, BLOCK_SIZE=src.shape[0])
ptx = pgm.asm["ptx"]
if N % 4 == 0:
assert "ld.global.v4.b32" in ptx
elif N % 2 == 0:
assert "ld.global.v2.b32" in ptx
else:
assert "ld.global.b32" in ptx
# triton.testing.assert_almost_equal(dst, src[:N])
# ---------------
# test store
# ---------------
# ---------------
# test if
# ---------------
# ---------------
# test for
# ---------------
# ---------------
# test while
# ---------------
# ---------------
# test default
# ---------------
# TODO: can't be local to test_default
@triton.jit
def _impl(value=10):
return value
def test_default():
value = 5
ret0 = torch.zeros(1, dtype=torch.int32, device='cuda')
ret1 = torch.zeros(1, dtype=torch.int32, device='cuda')
@triton.jit
def _kernel(ret0, ret1, value):
tl.store(ret0, _impl())
tl.store(ret1, _impl(value))
_kernel[(1,)](ret0, ret1, value)
assert ret0.item() == 10
assert ret1.item() == value
# ---------------
# test noop
# ----------------
def test_noop(device='cuda'):
@triton.jit
def kernel(x):
pass
x = to_triton(numpy_random((1,), dtype_str='int32'), device=device)
kernel[(1, )](x)
@pytest.mark.parametrize("value, value_type", [
(-1, 'i32'), (0, 'i32'), (-2**31, 'i32'), (2**31 - 1, 'i32'),
(2**31, 'u32'), (2**32 - 1, 'u32'), (2**32, 'i64'), (2**63 - 1, 'i64'),
(-2**63, 'i64'), (2**63, 'u64'), (2**64 - 1, 'u64')
])
def test_value_specialization(value: int, value_type: str, device='cuda') -> None:
spec_type = None
def cache_hook(*args, **kwargs):
nonlocal spec_type
spec_type = kwargs["compile"]["arg_types"][0][1]
JITFunction.cache_hook = cache_hook
@triton.jit
def kernel(VALUE, X):
pass
x = torch.tensor([3.14159], device='cuda')
pgm = kernel[(1, )](value, x)
JITFunction.cache_hook = None
assert spec_type == value_type
@pytest.mark.parametrize(
"value, overflow",
[(2**64 - 1, False), (2**64, True), (-2**63, False), (-2**63 - 1, True)]
)
def test_value_specialization_overflow(value: int, overflow: bool, device='cuda') -> None:
@triton.jit
def kernel(VALUE, X):
pass
x = torch.tensor([3.14159], device='cuda')
if overflow:
with pytest.raises(RuntimeError, match='integer overflow'):
kernel[(1, )](value, x)
else:
kernel[(1, )](value, x)
# ----------------
# test constexpr
# ----------------
@pytest.mark.parametrize("op", ['+', '-', '*', '/', '%', '<', '>'])
@pytest.mark.parametrize("is_lhs_constexpr", [False, True])
@pytest.mark.parametrize("is_rhs_constexpr", [True, False])
def test_bin_op_constexpr(op, is_lhs_constexpr, is_rhs_constexpr):
@triton.jit
def kernel(Z, X, Y):
x = tl.load(X)
y = tl.load(Y)
z = GENERATE_TEST_HERE
tl.store(Z, z)
x_str = "3.14" if is_lhs_constexpr else "x"
y_str = "4.13" if is_rhs_constexpr else "y"
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': f"{x_str} {op} {y_str}"})
x = numpy_random((1,), dtype_str="float32")
y = numpy_random((1,), dtype_str="float32")
z = np.array(eval(f"{x_str} {op} {y_str}"))
x_tri = to_triton(x)
y_tri = to_triton(y)
z_tri = to_triton(np.empty((1,), dtype=z.dtype))
kernel[(1,)](z_tri, x_tri, y_tri)
np.testing.assert_allclose(z, to_numpy(z_tri))
def test_constexpr_shape():
@triton.jit
def kernel(X):
off = tl.arange(0, 128 + 128)
tl.store(X + off, off)
x_tri = to_triton(np.empty((256, ), dtype=np.int32))
kernel[(1,)](x_tri)
np.testing.assert_equal(to_numpy(x_tri), np.arange(0, 256))
def test_constexpr_scalar_shape():
@triton.jit
def kernel(X, s):
off = tl.arange(0, 256)
val = off % (256 // s)
tl.store(X + off, val)
x_tri = to_triton(np.empty((256, ), dtype=np.int32))
kernel[(1,)](x_tri, 32)
np.testing.assert_equal(to_numpy(x_tri), np.arange(0, 256) % 8)
# -------------
# test if
# -------------
def test_if():
@triton.jit
def kernel(Cond, XTrue, XFalse, Ret):
pid = tl.program_id(0)
cond = tl.load(Cond)
if pid % 2:
tl.store(Ret, tl.load(XTrue))
else:
tl.store(Ret, tl.load(XFalse))
cond = torch.ones(1, dtype=torch.int32, device='cuda')
x_true = torch.tensor([3.14], dtype=torch.float32, device='cuda')
x_false = torch.tensor([1.51], dtype=torch.float32, device='cuda')
ret = torch.empty(1, dtype=torch.float32, device='cuda')
kernel[(1,)](cond, x_true, x_false, ret)
def test_num_warps_pow2():
dst = torch.empty(128, device='cuda')
@triton.jit
def _kernel(dst):
pass
with pytest.raises(AssertionError, match='must be a power of 2'):
_kernel[(1,)](dst=dst, num_warps=3)
_kernel[(1,)](dst=dst, num_warps=1)
_kernel[(1,)](dst=dst, num_warps=2)
_kernel[(1,)](dst=dst, num_warps=4)
# -------------
# test extern
# -------------
@pytest.mark.parametrize("dtype_str, expr, lib_path",
[('int32', 'libdevice.ffs', ''),
('float32', 'libdevice.pow', '/usr/local/cuda/nvvm/libdevice/libdevice.10.bc'),
('float64', 'libdevice.norm4d', '')])
def test_libdevice(dtype_str, expr, lib_path):
@triton.jit
def kernel(X, Y, BLOCK: tl.constexpr):
x = tl.load(X + tl.arange(0, BLOCK))
y = GENERATE_TEST_HERE
tl.store(Y + tl.arange(0, BLOCK), y)
shape = (128, )
rs = RandomState(17)
# limit the range of integers so that the sum does not overflow
x = numpy_random(shape, dtype_str=dtype_str, rs=rs)
if expr == 'libdevice.ffs':
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': 'tl.libdevice.ffs(x)'})
y_ref = np.zeros(shape, dtype=x.dtype)
for i in range(shape[0]):
y_ref[i] = (int(x[i]) & int(-x[i])).bit_length()
elif expr == 'libdevice.pow':
# numpy does not allow negative factors in power, so we use abs()
x = np.abs(x)
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': 'tl.libdevice.pow(x, x)'})
y_ref = np.power(x, x)
elif expr == 'libdevice.norm4d':
kernel = patch_kernel(kernel, {'GENERATE_TEST_HERE': 'tl.libdevice.norm4d(x, x, x, x)'})
y_ref = np.sqrt(4 * np.power(x, 2))
x_tri = to_triton(x)
# triton result
y_tri = to_triton(numpy_random((shape[0],), dtype_str=dtype_str, rs=rs), device='cuda')
kernel[(1,)](x_tri, y_tri, BLOCK=shape[0], extern_libs={'libdevice': lib_path})
# compare
if expr == 'libdevice.ffs':
np.testing.assert_equal(y_ref, to_numpy(y_tri))
else:
np.testing.assert_allclose(y_ref, to_numpy(y_tri), rtol=0.01)
| triton-master | python/test/unit/language/test_core.py |
import numpy as np
import pytest
import scipy.stats
import torch
import triton
import triton.language as tl
#####################################
# Reference Philox Implementation
#####################################
class PhiloxConfig:
def __init__(self, PHILOX_ROUND_A, PHILOX_ROUND_B, PHILOX_KEY_A, PHILOX_KEY_B, DTYPE):
self.PHILOX_ROUND_A = np.array(PHILOX_ROUND_A, dtype=DTYPE)
self.PHILOX_ROUND_B = np.array(PHILOX_ROUND_B, dtype=DTYPE)
self.PHILOX_KEY_A = np.array(PHILOX_KEY_A, dtype=DTYPE)
self.PHILOX_KEY_B = np.array(PHILOX_KEY_B, dtype=DTYPE)
self.DTYPE = DTYPE
# This is better for GPU
PHILOX_32 = PhiloxConfig(
PHILOX_KEY_A=0x9E3779B9,
PHILOX_KEY_B=0xBB67AE85,
PHILOX_ROUND_A=0xD2511F53,
PHILOX_ROUND_B=0xCD9E8D57,
DTYPE=np.uint32,
)
# This is what numpy implements
PHILOX_64 = PhiloxConfig(
PHILOX_KEY_A=0x9E3779B97F4A7C15,
PHILOX_KEY_B=0xBB67AE8584CAA73B,
PHILOX_ROUND_A=0xD2E7470EE14C6C93,
PHILOX_ROUND_B=0xCA5A826395121157,
DTYPE=np.uint64,
)
class CustomPhilox4x:
def __init__(self, seed, config):
self._config = config
seed = self._into_pieces(seed)
self._key = np.array(seed[:2], dtype=self._dtype)
self._counter = np.array((0, 0) + seed[2:], dtype=self._dtype)
@property
def _dtype(self):
return self._config.DTYPE
def _into_pieces(self, n, pad=4):
res = []
while len(res) < pad:
res.append(np.array(n, dtype=self._dtype))
n >>= (np.dtype(self._dtype).itemsize * 8)
assert n == 0
return tuple(res)
def _multiply_low_high(self, a, b):
low = a * b
high = int(a) * int(b)
high = np.array(high >> (np.dtype(self._dtype).itemsize * 8), dtype=self._dtype)
return low, high
def _single_round(self, counter, key):
lo0, hi0 = self._multiply_low_high(self._config.PHILOX_ROUND_A, counter[0])
lo1, hi1 = self._multiply_low_high(self._config.PHILOX_ROUND_B, counter[2])
ret0 = hi1 ^ counter[1] ^ key[0]
ret1 = lo1
ret2 = hi0 ^ counter[3] ^ key[1]
ret3 = lo0
return np.array([ret0, ret1, ret2, ret3], dtype=self._dtype)
def _raise_key(self, key):
pk = [self._config.PHILOX_KEY_A, self._config.PHILOX_KEY_B]
return key + np.array(pk, dtype=self._dtype)
def random_raw(self):
counter = self._counter
key = self._key
for _ in range(10):
counter = self._single_round(counter, key)
key = self._raise_key(key)
self.advance(1)
return counter
def advance(self, n_steps):
self._counter[0] += n_steps
assert self._counter[0] < 2**32, "FIXME: doesn't work for large offsets"
class CustomPhilox(CustomPhilox4x):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer = []
def random_raw(self):
if len(self.buffer) == 0:
self.buffer = list(super().random_raw())[::-1]
return int(self.buffer.pop())
#####################################
# Unit Tests
#####################################
BLOCK = 1024
# test generation of random uint32
@pytest.mark.parametrize('size, seed',
[(size, seed) for size in ['10', '4,53', '10000']
for seed in [0, 42, 124, 54, 0xffffffff, 0xdeadbeefcafeb0ba]]
)
def test_randint(size, seed, device='cuda'):
size = list(map(int, size.split(',')))
@triton.jit
def kernel(X, N, seed):
offset = tl.program_id(0) * BLOCK + tl.arange(0, BLOCK)
rand = tl.randint(seed, offset)
tl.store(X + offset, rand, mask=offset < N)
# triton result
x = torch.empty(size, dtype=torch.int32, device=device)
N = x.numel()
grid = (triton.cdiv(N, BLOCK),)
kernel[grid](x, N, seed)
out_tri = x.cpu().numpy().astype(np.uint32).flatten().tolist()
# reference result
gen = CustomPhilox4x(seed, config=PHILOX_32)
out_ref = [gen.random_raw()[0] for _ in out_tri]
assert out_tri == out_ref
# test uniform PRNG
@pytest.mark.parametrize('size, seed',
[(size, seed) for size in [1000000]
for seed in [0, 42, 124, 54]]
)
def test_rand(size, seed, device='cuda'):
@triton.jit
def kernel(X, N, seed):
offset = tl.program_id(0) * BLOCK + tl.arange(0, BLOCK)
rand = tl.rand(seed, offset)
tl.store(X + offset, rand, mask=offset < N)
# triton result
x = torch.empty(size, dtype=torch.float32, device=device)
N = x.numel()
grid = (triton.cdiv(N, BLOCK),)
kernel[grid](x, N, seed)
assert all((x >= 0) & (x <= 1))
assert scipy.stats.kstest(x.tolist(), 'uniform', args=(0, 1)).statistic < 0.01
# test normal PRNG
@pytest.mark.parametrize('size, seed',
[(size, seed) for size in [1000000]
for seed in [0, 42, 124, 54]]
)
def test_randn(size, seed, device='cuda'):
@triton.jit
def kernel(X, N, seed):
offset = tl.program_id(0) * BLOCK + tl.arange(0, BLOCK)
rand = tl.randn(seed, offset)
tl.store(X + offset, rand, mask=offset < N)
# triton result
x = torch.empty(size, dtype=torch.float32, device=device)
N = x.numel()
grid = (triton.cdiv(N, BLOCK),)
kernel[grid](x, N, seed)
assert abs(x.mean()) < 1e-2
assert abs(x.std() - 1) < 1e-2
# tl.rand() should never produce >=1.0
def test_rand_limits():
@triton.jit
def kernel(input, output, n: tl.constexpr):
idx = tl.arange(0, n)
x = tl.load(input + idx)
y = tl.random.uint32_to_uniform_float(x)
tl.store(output + idx, y)
min_max_int32 = torch.tensor([
torch.iinfo(torch.int32).min,
torch.iinfo(torch.int32).max,
], dtype=torch.int32, device='cuda')
output = torch.empty(2, dtype=torch.float32, device='cuda')
kernel[(1,)](min_max_int32, output, 2)
assert output[0] == output[1]
assert 1.0 - torch.finfo(torch.float32).eps <= output[0].item() < 1.0
| triton-master | python/test/unit/language/test_random.py |
import subprocess
import sys
import pytest
import torch
import triton
import triton.language as tl
from triton.testing import get_dram_gbps, get_max_tensorcore_tflops
DEVICE_NAME = 'v100'
#######################
# Utilities
#######################
def nvsmi(attrs):
attrs = ','.join(attrs)
cmd = ['nvidia-smi', '-i', '0', '--query-gpu=' + attrs, '--format=csv,noheader,nounits']
out = subprocess.check_output(cmd)
ret = out.decode(sys.stdout.encoding).split(',')
ret = [int(x) for x in ret]
return ret
#######################
# Matrix Multiplication
#######################
sm_clocks = {'v100': 1350, 'a100': 1350}
mem_clocks = {'v100': 877, 'a100': 1215}
matmul_data = {
'v100': {
# square
(256, 256, 256): {'float16': 0.027},
(512, 512, 512): {'float16': 0.158},
(1024, 1024, 1024): {'float16': 0.466},
(2048, 2048, 2048): {'float16': 0.695},
(4096, 4096, 4096): {'float16': 0.831},
(8192, 8192, 8192): {'float16': 0.849},
# tall-skinny
(16, 1024, 1024): {'float16': 0.0128},
(16, 4096, 4096): {'float16': 0.0883},
(16, 8192, 8192): {'float16': 0.101},
(64, 1024, 1024): {'float16': 0.073},
(64, 4096, 4096): {'float16': 0.270},
(64, 8192, 8192): {'float16': 0.459},
(1024, 64, 1024): {'float16': 0.0692},
(4096, 64, 4096): {'float16': 0.264},
(8192, 64, 8192): {'float16': 0.452},
},
'a100': {
(256, 256, 256): {'float16': 0.010, 'float32': 0.0214, 'int8': 0.006},
(512, 512, 512): {'float16': 0.061, 'float32': 0.109, 'int8': 0.030},
(1024, 1024, 1024): {'float16': 0.287, 'float32': 0.331, 'int8': 0.169},
(2048, 2048, 2048): {'float16': 0.604, 'float32': 0.599, 'int8': 0.385},
(4096, 4096, 4096): {'float16': 0.842, 'float32': 0.862, 'int8': 0.711},
(8192, 8192, 8192): {'float16': 0.896, 'float32': 0.932, 'int8': 0.860},
# tall-skinny
(16, 1024, 1024): {'float16': 0.0077, 'float32': 0.0127, 'int8': 0.005},
(16, 4096, 4096): {'float16': 0.0363, 'float32': 0.0457, 'int8': 0.0259},
(16, 8192, 8192): {'float16': 0.0564, 'float32': 0.0648, 'int8': 0.0431},
(64, 1024, 1024): {'float16': 0.0271, 'float32': 0.0509, 'int8': 0.0169},
(64, 4096, 4096): {'float16': 0.141, 'float32': 0.162, 'int8': 0.097},
(64, 8192, 8192): {'float16': 0.244, 'float32': 0.257, 'int8': 0.174},
(1024, 64, 1024): {'float16': 0.0263, 'float32': 0.0458, 'int8': 0.017},
(4096, 64, 4096): {'float16': 0.135, 'float32': 0.177, 'int8': 0.102},
(8192, 64, 8192): {'float16': 0.216, 'float32': 0.230, 'int8': 0.177},
}
# # deep reductions
# (64 , 64 , 16384) : {'a100': 0.},
# (64 , 64 , 65536) : {'a100': 0.},
# (256 , 256 , 8192 ) : {'a100': 0.},
# (256 , 256 , 32768) : {'a100': 0.},
}
@pytest.mark.parametrize('M, N, K, dtype_str',
[(M, N, K, dtype_str)
for M, N, K in matmul_data[DEVICE_NAME].keys()
for dtype_str in ['float16']])
def test_matmul(M, N, K, dtype_str):
if dtype_str in ['float32', 'int8'] and DEVICE_NAME != 'a100':
pytest.skip('Only test float32 & int8 on a100')
dtype = {'float16': torch.float16, 'float32': torch.float32, 'int8': torch.int8}[dtype_str]
torch.manual_seed(0)
ref_gpu_util = matmul_data[DEVICE_NAME][(M, N, K)][dtype_str]
cur_sm_clock = nvsmi(['clocks.current.sm'])[0]
ref_sm_clock = sm_clocks[DEVICE_NAME]
max_gpu_perf = get_max_tensorcore_tflops(dtype, clock_rate=cur_sm_clock * 1e3)
assert abs(cur_sm_clock - ref_sm_clock) < 10, f'GPU SMs must run at {ref_sm_clock} MHz'
if dtype == torch.int8:
a = torch.randint(-128, 127, (M, K), dtype=dtype, device='cuda')
b = torch.randint(-128, 127, (N, K), dtype=dtype, device='cuda')
b = b.t() # only test row-col layout
else:
a = torch.randn((M, K), dtype=dtype, device='cuda')
b = torch.randn((K, N), dtype=dtype, device='cuda')
fn = lambda: triton.ops.matmul(a, b)
ms = triton.testing.do_bench(fn, percentiles=None, warmup=25, rep=1000)
cur_gpu_perf = 2. * M * N * K / ms * 1e-9
cur_gpu_util = cur_gpu_perf / max_gpu_perf
triton.testing.assert_almost_equal(cur_gpu_util, ref_gpu_util, decimal=2)
#######################
# Element-Wise
#######################
@triton.jit
def _add(x_ptr, y_ptr, output_ptr, n_elements,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
tl.store(output_ptr + offsets, output, mask=mask)
elementwise_data = {
'v100': {
1024 * 16: 0.0219,
1024 * 64: 0.0791,
1024 * 256: 0.243,
1024 * 1024: 0.534,
1024 * 4096: 0.796,
1024 * 16384: 0.905,
1024 * 65536: 0.939,
},
'a100': {
1024 * 16: 0.008,
1024 * 64: 0.034,
1024 * 256: 0.114,
1024 * 1024: 0.315,
1024 * 4096: 0.580,
1024 * 16384: 0.782,
1024 * 65536: 0.850,
}
}
@pytest.mark.parametrize('N', elementwise_data[DEVICE_NAME].keys())
def test_elementwise(N):
torch.manual_seed(0)
ref_gpu_util = elementwise_data[DEVICE_NAME][N]
cur_mem_clock = nvsmi(['clocks.current.memory'])[0]
ref_mem_clock = mem_clocks[DEVICE_NAME]
max_gpu_perf = get_dram_gbps()
assert abs(cur_mem_clock - ref_mem_clock) < 10, f'GPU memmory must run at {ref_mem_clock} MHz'
z = torch.empty((N, ), dtype=torch.float16, device='cuda')
x = torch.randn_like(z)
y = torch.randn_like(z)
grid = lambda args: (triton.cdiv(N, args['BLOCK_SIZE']), )
fn = lambda: _add[grid](x, y, z, N, BLOCK_SIZE=1024)
ms = triton.testing.do_bench(fn, percentiles=None, warmup=25, rep=250)
cur_gpu_perf = 3. * N * z.element_size() / ms * 1e-6
cur_gpu_util = cur_gpu_perf / max_gpu_perf
triton.testing.assert_almost_equal(cur_gpu_util, ref_gpu_util, decimal=2)
| triton-master | python/test/regression/test_performance.py |
"""isort:skip_file"""
# flake8: noqa: F401
__version__ = '2.0.0'
# TODO: torch needs to be imported first
# or pybind11 shows `munmap_chunk(): invalid pointer`
import torch
# submodules
from .code_gen import cdiv, next_power_of_2, jit, autotune, heuristics, \
JITFunction, Config, Autotuner, reinterpret
from . import language
from . import code_gen
from . import testing
from . import ops
| triton-master | python/triton/__init__.py |
from __future__ import annotations
import ast
import builtins
import functools
import hashlib
import inspect
import os
import pickle
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
import warnings
from typing import Dict, Set, Tuple, Union
import torch
from filelock import FileLock
import triton
import triton._C.libtriton.triton as _triton
from .tools.disasm import extract
try:
from torch._C import _cuda_getCurrentRawStream as get_cuda_stream
except ImportError:
get_cuda_stream = lambda dev_idx: torch.cuda.current_stream(dev_idx).cuda_stream
def current_cuda_stream(device_idx=0):
# Torch's torch.cuda.current_stream() is slow. We provide this
# function to give the user an opportunity to monkey-patch their
# own faster current stream lookup.
return get_cuda_stream(device_idx)
def mangle_ty(ty):
if ty.is_ptr():
return 'P' + mangle_ty(ty.element_ty)
if ty.is_int():
return 'i' + str(ty.int_bitwidth)
if ty.is_fp8():
return 'fp8'
if ty.is_fp16():
return 'fp16'
if ty.is_bf16():
return 'bf16'
if ty.is_fp32():
return 'fp32'
if ty.is_fp64():
return 'fp64'
if ty.is_void():
return 'V'
if ty.is_block():
elt = mangle_ty(ty.scalar)
shape = '_'.join(map(str, ty.shape))
return f'{elt}S{shape}S'
assert False, "Unsupport type"
def mangle_fn(name, arg_tys, constants):
# doesn't mangle ret type, which must be a function of arg tys
mangled_arg_names = '_'.join([mangle_ty(ty) for ty in arg_tys])
key = lambda x: x.__name__ if isinstance(x, JITFunction) else repr(x)
mangled_constants = '_'.join([f'{i}c{key(constants[i])}' for i in sorted(constants)])
mangled_constants = mangled_constants.replace('.', '_d_')
mangled_constants = mangled_constants.replace("'", '_sq_')
mangled_constants = mangled_constants.replace("e-", '_em_')
ret = f'{name}__{mangled_arg_names}__{mangled_constants}'
return ret
def is_triton_tensor(value):
return isinstance(value, triton.language.tensor)
class ValueConstructor:
def __init__(self, module, builder, gscope) -> None:
self.gscope = gscope
self.lscope = dict()
self.builder = builder
self.module = module
# [name, bb] => triton.language.tensor
self.lvalues: Dict[Tuple[str, _triton.ir.basic_block], triton.language.tensor] = {}
# bb => {name => phi}
self.incomplete_phis = {}
self.sealed_blocks: Set[_triton.ir.basic_block] = set()
#
self.builtins = {
'range': range,
'min': triton.language.minimum,
'float': float,
'int': int,
'print': print,
'isinstance': isinstance,
'getattr': getattr,
}
def get_value(self, name):
''' This function:
1. make sure `name` is defined
2. if `name` is triton.language.tensor, get stored tensor by calling
`self._get_tensor()`
'''
# search node.id in local scope
ret = None
if name in self.lscope:
ret = self.lscope[name]
# search node.id in global scope
elif name in self.gscope:
ret = self.gscope[name]
# search node.id in builtins
elif name in self.builtins:
ret = self.builtins[name]
else:
raise ValueError(f'{name} is not defined')
if is_triton_tensor(ret):
return self._get_tensor(name, self.builder.get_insert_block())
return ret
def set_value(self, name: str,
value: Union[triton.language.tensor, triton.language.constexpr]) -> None:
''' This function:
called by visit_Assign() & visit_FuncDef() to store left value (lvalue)
1. record local defined name (FIXME: should consider control flow)
2. store tensor in self.lvalue
'''
self.lscope[name] = value
if isinstance(value, triton.language.tensor):
self._set_value(name, self.builder.get_insert_block(), value)
#
# SSA-construction
#
def _get_tensor(self, name: str, bb: _triton.ir.basic_block) -> triton.language.tensor:
# local value numbering
if (name, bb) in self.lvalues:
return self.lvalues[(name, bb)]
# global value numbering
saved_insert_point = self.builder.get_insert_point()
result = self._get_tensor_recursive(name, bb)
self.builder.set_insert_point(saved_insert_point)
return result
def _get_tensor_recursive(self, name: str, bb: _triton.ir.basic_block) -> triton.language.tensor:
preds = bb.get_predecessors()
type = self.lscope[name].type
# some preds haven't been filled, create a phi as a proxy of the value
if bb not in self.sealed_blocks:
result = self._make_phi(type, len(preds), bb)
if bb in self.incomplete_phis:
self.incomplete_phis[bb][name] = result
else:
self.incomplete_phis[bb] = {name: result}
elif len(preds) == 1:
# one predecessor: no phi needed, try get value from pred
result = self._get_tensor(name, preds[0])
elif len(preds) == 0:
result = self._get_tensor(name, None)
else: # multiple preds
phi = self._make_phi(type, len(preds), bb)
self._set_value(name, bb, phi)
result = self._add_phi_operands(name, phi)
self._set_value(name, bb, result)
return result
# returns a new phi tensor, which encausulate an ir.phi_node
def _make_phi(self,
type: triton.language.dtype,
num_values: int,
bb: _triton.ir.basic_block) -> triton.language.tensor:
instr = bb.get_first_non_phi()
self.builder.set_insert_point((bb, instr))
ir_phi = self.builder.create_phi(type.to_ir(self.builder), num_values)
if instr:
self.builder.set_insert_block(bb)
return triton.language.tensor(ir_phi, type)
# complete a phi node. (TODO: rename this as _complete_phis?)
# Note: since we try to remove tryival phi, the return tensor might not be a phi
def _add_phi_operands(self, name: str,
phi: triton.language.tensor) -> triton.language.tensor:
bb = phi.handle.get_parent()
for pred in bb.get_predecessors():
v = self._get_tensor(name, pred)
phi.handle.add_incoming(v.handle, pred)
phi = self._try_remove_trivial_phi(phi)
return phi
def _set_value(self, name: str, bb: _triton.ir.basic_block, value: triton.language.tensor) -> None:
self.lvalues[(name, bb)] = value
# TODO: why we need this?
self.module.set_instr_metadata(name, value.handle)
def _seal_block(self, bb: _triton.ir.basic_block):
# complete all incomplete phis
if bb in self.incomplete_phis:
for name, phi in self.incomplete_phis[bb].items():
result = self._add_phi_operands(name, phi)
# it's possible that this phi is trivial
if self._get_tensor(name, bb).handle == phi.handle:
self._set_value(name, bb, result)
del self.incomplete_phis[bb]
self.sealed_blocks.add(bb)
def _try_remove_trivial_phi(self, phi: triton.language.tensor) -> triton.language.tensor:
unique_handles = {op for op in phi.handle.ops() if op != phi.handle}
if len(unique_handles) != 1: # non-trivial phi
return phi
v = unique_handles.pop()
phi.handle.replace_all_uses_with(v)
# phi.handle.erase_from_parent()
# TODO: remove trivial phis recursively
return triton.language.tensor(v, phi.type)
class CodeGenerator(ast.NodeVisitor):
def __init__(self, context, prototype, gscope, attributes, constants, prototypes=None, module=None, is_kernel=False):
self.prototypes = dict() if prototypes is None else prototypes
self.builder = _triton.ir.builder(context)
self.module = _triton.ir.module('', self.builder) if module is None else module
self.prototype = prototype
self.attributes = attributes
self.constants = constants
self.last_node = None
self.is_kernel = is_kernel
self.value_constructor = ValueConstructor(self.module, self.builder, gscope)
#
# AST visitor
#
def visit_compound_statement(self, stmts):
for stmt in stmts:
self.last_ret = self.visit(stmt)
if isinstance(stmt, ast.Return):
break
return stmts and isinstance(stmt, ast.Return)
def visit_Module(self, node):
ast.NodeVisitor.generic_visit(self, node)
def visit_List(self, node):
ctx = self.visit(node.ctx)
assert ctx is None
elts = [self.visit(elt) for elt in node.elts]
return elts
# By design, only non-kernel functions can return
def visit_Return(self, node):
ret = self.visit(node.value)
if ret is None:
return triton.language.tensor(self.builder.ret_void(), triton.language.void)
ret = triton.language.core._to_tensor(ret, self.builder)
ret = triton.language.tensor(self.builder.ret(ret.handle), ret.type)
return ret
def visit_FunctionDef(self, node):
arg_names, kwarg_names = self.visit(node.args)
# initialize defaults
for i, default_value in enumerate(node.args.defaults):
arg_node = node.args.args[-i - 1]
annotation = arg_node.annotation
name = arg_node.arg
st_target = ast.Name(id=name, ctx=ast.Store())
if annotation is None:
init_node = ast.Assign(targets=[st_target], value=default_value)
else:
init_node = ast.AnnAssign(target=st_target, value=default_value, annotation=annotation)
self.visit(init_node)
# initialize function
fn_name = mangle_fn(node.name, self.prototype.param_types, self.constants)
self.prototypes[fn_name] = self.prototype
fn = self.module.get_or_insert_function(fn_name, self.prototype.to_ir(self.builder))
fn.set_is_kernel(self.is_kernel)
arg_values = []
idx = 0
for i, arg_name in enumerate(arg_names):
if i in self.constants:
cst = self.constants[i]
if not isinstance(cst, triton.language.constexpr):
cst = triton.language.constexpr(self.constants[i])
arg_values.append(cst)
else:
if i in self.attributes:
is_ptr = fn.args[idx].type.is_ptr()
attr = 'aligned' if is_ptr else 'multiple_of'
attr = getattr(_triton.ir.attribute_kind, attr)
attr = _triton.ir.attribute(attr, self.attributes[i])
fn.add_attr(idx + 1, attr)
fn.args[idx].name = arg_name
arg_values.append(triton.language.tensor(fn.args[idx], self.prototype.param_types[idx]))
idx += 1
insert_pt = self.builder.get_insert_block()
entry = _triton.ir.basic_block.create(self.builder.context, "entry", fn)
self.builder.set_insert_block(entry)
self.value_constructor._seal_block(entry)
for arg_name, arg_value in zip(arg_names, arg_values):
self.value_constructor.set_value(arg_name, arg_value)
# visit function body
has_ret = self.visit_compound_statement(node.body)
# finalize
if not has_ret:
self.builder.ret_void()
else:
# a bit hacky: we only know the return type at the last moment so we update type info here
self.module.reset_ret_ty(fn_name, self.last_ret.type.to_ir(self.builder))
self.prototype.ret_type = self.last_ret.type
self.builder.set_insert_block(insert_pt)
def visit_arguments(self, node):
arg_names = []
for arg in node.args:
arg_names += [self.visit(arg)]
kwarg_names = self.visit(node.kwarg)
return arg_names, kwarg_names
def visit_arg(self, node):
ast.NodeVisitor.generic_visit(self, node)
return node.arg
def visit_AnnAssign(self, node):
# extract attributes
annotation = self.visit(node.annotation)
target = self.visit(node.target)
value = self.visit(node.value)
# constexpr
if annotation == triton.language.constexpr:
if target in self.value_constructor.lscope:
raise ValueError(f'{target} is already defined.'
f' constexpr cannot be reassigned.')
if not isinstance(value, triton.language.constexpr):
value = triton.language.constexpr(value)
self.value_constructor.lscope[target] = value
return self.value_constructor.lscope[target]
# default: call visit_Assign
return self.visit_Assign(node)
def visit_Assign(self, node):
_names = []
for target in node.targets:
_names += [self.visit(target)]
assert len(_names) == 1
names = _names[0]
values = self.visit(node.value)
if not isinstance(names, tuple):
names = [names]
if not isinstance(values, tuple):
values = [values]
if isinstance(values[0], triton.language.tensor) \
and isinstance(values[0].type, triton.language.tuple_type):
struct = values[0].handle
tys = values[0].type.element_types
values = [self.builder.extract_value(struct, i) for i in range(len(tys))]
values = [triton.language.tensor(v, ty) for v, ty in zip(values, tys)]
assert len(values) == len(names)
for name, value in zip(names, values):
# TODO: can we store constexpr here to support constant folding?
# by default, constexpr are assigned into python variable
if isinstance(value, triton.language.constexpr):
value = value.value
if value is None:
raise ValueError(f'Cannot assign None to non-constexpr `{name}`. Please annotate as `: tl.constexpr`')
if not isinstance(value, triton.language.tensor):
value = triton.language.core._to_tensor(value, self.builder)
self.value_constructor.set_value(name, value)
def visit_AugAssign(self, node):
name = node.target.id
lhs = ast.Name(id=name, ctx=ast.Load())
rhs = ast.BinOp(lhs, node.op, node.value)
assign = ast.Assign(targets=[node.target], value=rhs)
self.visit(assign)
return self.value_constructor.get_value(name)
def visit_Name(self, node):
if type(node.ctx) == ast.Store:
return node.id
return self.value_constructor.get_value(node.id)
def visit_Store(self, node):
ast.NodeVisitor.generic_visit(self, node)
def visit_Load(self, node):
ast.NodeVisitor.generic_visit(self, node)
def visit_Tuple(self, node):
args = [self.visit(x) for x in node.elts]
mode = type(args[0])
# tuple of values -- create a struct
if len(args) > 1 and mode == triton.language.tensor\
and all([type(arg) == mode for arg in args]):
tuple_ty = triton.language.tuple_type([arg.type for arg in args])
ret = _triton.ir.undef.get(tuple_ty.to_ir(self.builder))
for i, arg in enumerate(args):
ret = self.builder.insert_value(ret, arg.handle, i)
ret = triton.language.tensor(ret, tuple_ty)
return ret
return tuple(args)
def visit_BinOp(self, node):
# visit operand
lhs = self.visit(node.left)
rhs = self.visit(node.right)
is_lhs_constexpr = isinstance(lhs, triton.language.constexpr)
is_rhs_constexpr = isinstance(rhs, triton.language.constexpr)
lhs = lhs.value if is_lhs_constexpr else lhs
rhs = rhs.value if is_rhs_constexpr else rhs
# get function name
fn = {
ast.Add: '__add__',
ast.Sub: '__sub__',
ast.Mult: '__mul__',
ast.Div: '__truediv__',
ast.FloorDiv: '__floordiv__',
ast.Mod: '__mod__',
ast.Pow: '__pow__',
ast.LShift: '__lshift__',
ast.RShift: '__rshift__',
ast.BitAnd: '__and__',
ast.BitOr: '__or__',
ast.BitXor: '__xor__',
}[type(node.op)]
# return a new constexpr if both arg are constexprs
if is_lhs_constexpr and is_rhs_constexpr:
return triton.language.constexpr(getattr(lhs, fn)(rhs))
# call operator
if is_triton_tensor(lhs):
return getattr(lhs, fn)(rhs, _builder=self.builder)
elif is_triton_tensor(rhs):
fn = fn[:2] + 'r' + fn[2:]
return getattr(rhs, fn)(lhs, _builder=self.builder)
else:
return getattr(lhs, fn)(rhs)
def visit_If(self, node):
cond = self.visit(node.test)
if isinstance(cond, triton.language.tensor):
cond = cond.to(triton.language.int1, _builder=self.builder)
current_bb = self.builder.get_insert_block()
then_bb = _triton.ir.basic_block.create(self.builder.context, "then", current_bb.parent)
else_bb = _triton.ir.basic_block.create(self.builder.context, "else", current_bb.parent) if node.orelse else None
endif_bb = _triton.ir.basic_block.create(self.builder.context, "endif", current_bb.parent)
self.value_constructor._seal_block(then_bb)
if else_bb:
self.value_constructor._seal_block(else_bb)
self.builder.cond_br(cond.handle, then_bb, else_bb)
else:
self.builder.cond_br(cond.handle, then_bb, endif_bb)
self.builder.set_insert_block(then_bb)
is_terminator = self.visit_compound_statement(node.body)
# TODO: last statement is a terminator?
if not is_terminator:
self.builder.br(endif_bb)
if else_bb:
self.builder.set_insert_block(else_bb)
is_terminator = self.visit_compound_statement(node.orelse)
# TODO: last statement is a terminator?
if not is_terminator:
self.builder.br(endif_bb)
self.value_constructor._seal_block(endif_bb)
self.builder.set_insert_block(endif_bb)
else:
if isinstance(cond, triton.language.constexpr):
cond = cond.value
if cond:
self.visit_compound_statement(node.body)
else:
self.visit_compound_statement(node.orelse)
def visit_IfExp(self, node):
cond = self.visit(node.test)
if cond.value:
return self.visit(node.body)
else:
return self.visit(node.orelse)
def visit_Pass(self, node):
pass
def visit_Compare(self, node):
assert len(node.comparators) == 1
assert len(node.ops) == 1
lhs = self.visit(node.left)
rhs = self.visit(node.comparators[0])
is_lhs_constexpr = isinstance(lhs, triton.language.constexpr)
is_rhs_constexpr = isinstance(rhs, triton.language.constexpr)
lhs = lhs.value if is_lhs_constexpr else lhs
rhs = rhs.value if is_rhs_constexpr else rhs
# handle `is`` and `is not``
if type(node.ops[0]) == ast.Is:
return triton.language.constexpr(lhs is rhs)
if type(node.ops[0]) == ast.IsNot:
return triton.language.constexpr(lhs is not rhs)
# function name
fn = {
ast.Eq: '__eq__',
ast.NotEq: '__ne__',
ast.Lt: '__lt__',
ast.LtE: '__le__',
ast.Gt: '__gt__',
ast.GtE: '__ge__',
}[type(node.ops[0])]
# return a new constexpr if both arg are constexprs
if is_lhs_constexpr and is_rhs_constexpr:
return triton.language.constexpr(getattr(lhs, fn)(rhs))
# call operator
if is_triton_tensor(lhs):
return getattr(lhs, fn)(rhs, _builder=self.builder)
elif is_triton_tensor(rhs):
fn = fn[:2] + 'r' + fn[2:]
return getattr(rhs, fn)(lhs, _builder=self.builder)
else:
assert False
def visit_UnaryOp(self, node):
op = self.visit(node.operand)
if type(node.op) == ast.Not:
assert isinstance(op, triton.language.constexpr), "`not` only supported for constexpr at the moment"
return triton.language.constexpr(not op)
fn = {
ast.USub: '__neg__',
ast.UAdd: '__pos__',
ast.Invert: '__invert__',
}[type(node.op)]
if isinstance(op, triton.language.constexpr):
return triton.language.constexpr(getattr(op.value, fn)())
assert is_triton_tensor(op)
return getattr(op, fn)(_builder=self.builder)
def visit_While(self, node):
current_bb = self.builder.get_insert_block()
loop_bb = _triton.ir.basic_block.create(self.builder.context, "loop", current_bb.parent)
next_bb = _triton.ir.basic_block.create(self.builder.context, "postloop", current_bb.parent)
def continue_fn():
cond = self.visit(node.test)
return self.builder.cond_br(cond.handle, loop_bb, next_bb)
continue_fn()
self.builder.set_insert_block(loop_bb)
self.visit_compound_statement(node.body)
continue_fn()
stop_bb = self.builder.get_insert_block()
self.value_constructor._seal_block(stop_bb)
self.value_constructor._seal_block(loop_bb)
self.value_constructor._seal_block(next_bb)
self.builder.set_insert_block(next_bb)
for stmt in node.orelse:
ast.NodeVisitor.generic_visit(self, stmt)
def visit_Subscript(self, node):
assert node.ctx.__class__.__name__ == "Load"
lhs = self.visit(node.value)
slices = self.visit(node.slice)
if is_triton_tensor(lhs):
return lhs.__getitem__(slices, _builder=self.builder)
return lhs[slices]
def visit_ExtSlice(self, node):
return [self.visit(dim) for dim in node.dims]
def visit_For(self, node):
iterator = self.visit(node.iter.func)
if iterator != self.value_constructor.builtins['range']:
raise RuntimeError('Only `range` iterator currently supported')
# static for loops: all iterator arguments are constexpr
iter_args = [self.visit(arg) for arg in node.iter.args]
is_static = all([isinstance(x, triton.language.constexpr) for x in iter_args])
if is_static:
st_target = ast.Name(id=node.target.id, ctx=ast.Store())
iter_args = [arg.value for arg in iter_args]
range = iterator(*iter_args)
if len(range) <= 10:
for i in iterator(*iter_args):
self.value_constructor.lscope[node.target.id] = triton.language.constexpr(i)
self.visit_compound_statement(node.body)
for stmt in node.orelse:
ast.NodeVisitor.generic_visit(self, stmt)
return
# create nodes
st_target = ast.Name(id=node.target.id, ctx=ast.Store())
ld_target = ast.Name(id=node.target.id, ctx=ast.Load())
arg_0 = node.iter.args[0] if len(node.iter.args) > 1 else ast.Num(0)
arg_1 = node.iter.args[1] if len(node.iter.args) > 1 else node.iter.args[0]
arg_2 = node.iter.args[2] if len(node.iter.args) > 2 else ast.Num(1)
# init node
init_node = ast.Assign(targets=[st_target], value=arg_0)
# step node
pos_cond_node = ast.Compare(ld_target, [ast.Lt()], [arg_1])
neg_cond_node = ast.Compare(ld_target, [ast.Gt()], [arg_1])
pos_step_node = ast.Compare(arg_2, [ast.Gt()], [ast.Num(0)])
build_cond = lambda: triton.language.where(self.visit(pos_step_node),
self.visit(pos_cond_node),
self.visit(neg_cond_node),
_builder=self.builder)
# cond_node = neg_cond_node
step_node = ast.AugAssign(target=st_target, op=ast.Add(), value=arg_2)
# code generation
current_bb = self.builder.get_insert_block()
loop_bb = _triton.ir.basic_block.create(self.builder.context, "loop", current_bb.parent)
next_bb = _triton.ir.basic_block.create(self.builder.context, "postloop", current_bb.parent)
def continue_fn():
self.visit(step_node)
cond = build_cond()
return self.builder.cond_br(cond.handle, loop_bb, next_bb)
# init loop induction variable
self.visit(init_node)
# promote it to right type
init_val = self.value_constructor.get_value(node.target.id)
promote = lambda a, b: triton.language.semantic.computation_type_impl(a, b, False)
start_ty = triton.language.core._to_tensor(iter_args[0], self.builder).type
stop_ty = triton.language.core._to_tensor(iter_args[1], self.builder).type if len(iter_args) > 1 else None
ty = promote(start_ty, stop_ty) if len(iter_args) > 1 else start_ty
casted = triton.language.semantic.cast(init_val, ty, self.builder)
self.value_constructor.set_value(node.target.id, casted)
# create cond
cond = build_cond()
self.builder.cond_br(cond.handle, loop_bb, next_bb)
self.builder.set_insert_block(loop_bb)
self.visit_compound_statement(node.body)
# TODO: handle case where body breaks control flow
continue_fn()
stop_bb = self.builder.get_insert_block()
self.value_constructor._seal_block(stop_bb)
self.value_constructor._seal_block(loop_bb)
self.value_constructor._seal_block(next_bb)
self.builder.set_insert_block(next_bb)
for stmt in node.orelse:
ast.NodeVisitor.generic_visit(self, stmt)
def visit_Slice(self, node):
lower = self.visit(node.lower)
upper = self.visit(node.upper)
step = self.visit(node.step)
return slice(lower, upper, step)
def visit_Index(self, node):
return self.visit(node.value)
def visit_keyword(self, node):
return {node.arg: self.visit(node.value)}
def visit_Call(self, node):
fn = self.visit(node.func)
if isinstance(fn, triton.language.constexpr):
fn = fn.value
kws = dict()
for keyword in node.keywords:
kws.update(self.visit(keyword))
args = [self.visit(arg) for arg in node.args]
if isinstance(fn, JITFunction):
from inspect import getcallargs
args = getcallargs(fn.fn, *args, **kws)
args = [args[name] for name in fn.arg_names]
args = [arg if isinstance(arg, triton.language.tensor)
else triton.language.constexpr(arg) for arg in args]
# generate function def
attributes = dict()
constexprs = [i for i, arg in enumerate(args) if isinstance(arg, triton.language.constexpr)]
constants = {i: args[i] for i in constexprs}
# generate call
args = [None if i in constexprs else arg for i, arg in enumerate(args)]
arg_vals = [arg.handle for arg in args if arg is not None]
arg_types = [arg.type for arg in args if arg is not None]
fn_name = mangle_fn(fn.__name__, arg_types, constants)
# generate function def if necessary
if not self.module.has_function(fn_name):
ret_type = triton.language.void
prototype = triton.language.function_type(ret_type, arg_types)
gscope = sys.modules[fn.fn.__module__].__dict__
generator = CodeGenerator(self.builder.context, prototype, gscope, attributes, constants, prototypes=self.prototypes, module=self.module)
generator.visit(fn.parse())
symbol = self.module.get_function(fn_name)
ret = self.builder.call(symbol, arg_vals)
if not ret.type.is_void():
ret = triton.language.tensor(ret, self.prototypes[fn_name].ret_type)
return ret
# built-in function
if sys.modules[fn.__module__] is triton.language.core or isinstance(fn, triton.language.extern.ExternalFunction):
ret = fn(*args, _builder=self.builder, **kws)
if fn in self.value_constructor.builtins.values():
args = [arg.value if isinstance(arg, triton.language.constexpr) else arg
for arg in args]
ret = fn(*args, **kws)
if isinstance(ret, (bool, int, float)):
ret = triton.language.core.constexpr(ret)
else:
ret = triton.language.core._to_tensor(ret, self.builder)
# special case: dynamic parallelism
# in this case the core primitive returns a proxy
# if isinstance(ret, triton.language.core.LaunchProxy):
# ret_type = _triton.ir.type.get_void(self.builder.context)
# arg_tys = [x.type for x in ret.args]
# prototype = _triton.ir.type.make_function(ret_type, arg_tys)
# gscope = sys.modules[ret.fn.fn.__module__].__dict__
# constants = ret.constants
# fn_name = mangle_fn(ret.fn.__name__, arg_tys, ret.constants)
# # TODO: clean-up attributes handling in function
# if not self.module.has_function(fn_name):
# attributes = {i: list(arg.parent.get_attrs(arg))[0].value for i, arg in enumerate(ret.args) \
# if isinstance(arg, _triton.ir.argument) and arg.parent.has_attr(i + 1) }
# generator = CodeGenerator(self.builder.context, prototype, gscope, attributes, constants, module=self.module, is_kernel=True)
# generator.visit(ret.fn.parse())
# symbol = self.module.get_function(fn_name)
# # TODO: should ret.args not include any constants ?
# ret = self.builder.launch(symbol, ret.args, ret.grid, ret.num_warps)
return ret
# return fn(*args, **kws)
def visit_Constant(self, node):
return triton.language.constexpr(node.value)
if sys.version_info < (3, 8):
def visit_NameConstant(self, node):
return triton.language.constexpr(node.value)
def visit_Num(self, node):
return triton.language.constexpr(node.n)
def visit_Str(self, node):
return triton.language.constexpr(ast.literal_eval(node))
def visit_Attribute(self, node):
lhs = self.visit(node.value)
return getattr(lhs, node.attr)
def visit_Expr(self, node):
ast.NodeVisitor.generic_visit(self, node)
def visit_NoneType(self, node):
return None
def visit(self, node):
if node is not None:
self.last_node = node
with warnings.catch_warnings():
# The ast library added visit_Constant and deprecated some other
# methods but we can't move to that without breaking Python 3.6 and 3.7.
warnings.simplefilter("ignore", DeprecationWarning) # python 3.9
warnings.simplefilter("ignore", PendingDeprecationWarning) # python 3.8
return super().visit(node)
def generic_visit(self, node):
typename = type(node).__name__
raise NotImplementedError("Unsupported node: {}".format(typename))
class Binary:
def __init__(self, backend, name, asm, shared_mem, num_warps):
self.backend = backend
self.name = name
self.asm = asm
self.shared_mem = shared_mem
self.num_warps = num_warps
class LoadedBinary:
def __init__(self, device: int, bin: Binary):
module, kernel, n_regs, n_spills = _triton.code_gen.load_binary(bin.backend,
bin.name,
bin.asm,
bin.shared_mem,
device)
self.bin = bin
self.asm = bin.asm
self.sass = ''
self.module = module
self.kernel = kernel
self.n_regs = n_regs
self.n_spills = n_spills
self.device = device
self.shared_mem = bin.shared_mem
def __call__(self, stream, args, grid_0, grid_1=1, grid_2=1):
_triton.runtime.enqueue(self.bin.backend, stream, self.kernel,
grid_0, grid_1, grid_2,
self.bin.num_warps * 32, 1, 1,
args, self.bin.shared_mem)
def get_sass(self, fun=None):
if self.sass:
return self.sass
fd, path = tempfile.mkstemp()
try:
with open(fd, 'wb') as cubin:
cubin.write(self.asm['cubin'])
self.sass = extract(path, fun)
finally:
os.remove(path)
self.asm['sass'] = self.sass
return self.sass
class CompilationError(Exception):
def __init__(self, src, node):
self.message = f'at {node.lineno}:{node.col_offset}:\n'
self.message += '\n'.join(src.split('\n')[:node.lineno])
self.message += '\n' + ' ' * node.col_offset + '^'
self.src = src
self.node = node
super().__init__(self.message)
def __reduce__(self):
# this is necessary to make CompilationError picklable
return (type(self), (self.src, self.node))
class OutOfResources(Exception):
def __init__(self, required, limit, name):
self.message = f'out of resource: {name}, '\
f'Required: {required}, '\
f'Hardware limit: {limit}'
self.required = required
self.limit = limit
self.name = name
super().__init__(self.message)
def __reduce__(self):
# this is necessary to make CompilationError picklable
return (type(self), (self.required, self.limit, self.name))
class Kernel:
@staticmethod
def _type_name(obj):
type_names = {
triton.language.float8: 'f8',
torch.bfloat16: 'bf16',
torch.float16: 'f16',
torch.float32: 'f32',
torch.float64: 'f64',
torch.bool: 'i1',
torch.uint8: 'u8',
torch.int8: 'i8',
torch.int16: 'i16',
torch.int32: 'i32',
torch.int64: 'i64',
triton.language.uint8: 'u8',
triton.language.uint16: 'u16',
triton.language.uint32: 'u32',
triton.language.uint64: 'u64',
}
if hasattr(obj, 'data_ptr'):
return type_names[obj.dtype]
if isinstance(obj, triton.language.constexpr):
obj = obj.value
if isinstance(obj, int):
if -2**31 <= obj < 2**31:
return 'i32'
elif 2**31 <= obj < 2**32:
return 'u32'
elif -2**63 <= obj < 2**63:
return 'i64'
elif 2**63 <= obj < 2**64:
return 'u64'
else:
raise ValueError(f'integer overflow representing {obj}')
if isinstance(obj, float):
return 'f'
if isinstance(obj, bool):
return 'B'
if isinstance(obj, str):
return 'str'
raise NotImplementedError(f'could not compute type name for {obj}')
@staticmethod
def _to_python_ir(obj):
# convert torch.Tensor to Triton IR pointers
if hasattr(obj, 'data_ptr'):
name = Kernel._type_name(obj)
return 'ptr', name
# default path returns triton.ir.type directly
name = Kernel._type_name(obj)
return 'scalar', name
@staticmethod
def _to_triton_ir(obj):
which, name = obj
type_map = {
'I': triton.language.int32,
'L': triton.language.int64,
'f': triton.language.float32,
'B': triton.language.int1,
'f8': triton.language.float8,
'f16': triton.language.float16,
'bf16': triton.language.bfloat16,
'f32': triton.language.float32,
'f64': triton.language.float64,
'i1': triton.language.int1,
'i8': triton.language.int8,
'i16': triton.language.int16,
'i32': triton.language.int32,
'i64': triton.language.int64,
'u8': triton.language.uint8,
'u16': triton.language.uint16,
'u32': triton.language.uint32,
'u64': triton.language.uint64,
}
# convert torch.Tensor to Triton IR pointers
if which == 'ptr':
elt_ty = type_map[name]
return triton.language.pointer_type(elt_ty, 1)
# default path returns triton.ir.type directly
return type_map[name]
@staticmethod
def pow2_divisor(N):
if N % 16 == 0:
return 16
if N % 8 == 0:
return 8
if N % 4 == 0:
return 4
if N % 2 == 0:
return 2
return 1
def __init__(self, fn):
self.fn = fn
self.cache_key = {}
def add_to_cache(self, key, wargs, device_idx, num_warps, num_stages, extern_libs):
tensor_idxs = [i for i, arg in enumerate(wargs) if hasattr(arg, 'data_ptr')]
# attributes
attributes = dict()
for i, arg in enumerate(wargs):
if i in self.fn.do_not_specialize:
continue
if isinstance(arg, int):
attributes[i] = Kernel.pow2_divisor(arg)
elif i in tensor_idxs:
addr = arg.data_ptr()
range_size = _triton.runtime.get_pointer_range_size(addr)
attributes[i] = min(Kernel.pow2_divisor(addr),
Kernel.pow2_divisor(range_size))
# transforms ints whose value is one into constants for just-in-time compilation
constants = {i: arg for i, arg in enumerate(wargs) if isinstance(arg, int) and arg == 1 and i not in self.fn.do_not_specialize}
constants.update({i: arg.value for i, arg in enumerate(wargs) if isinstance(arg, triton.language.constexpr)})
constants.update({i: None for i, arg in enumerate(wargs) if arg is None})
arg_types = [Kernel._to_python_ir(arg) for i, arg in enumerate(wargs) if i not in constants]
return self.fn._warmup(key, arg_types=arg_types, device=device_idx, attributes=attributes, constants=constants, num_warps=num_warps, num_stages=num_stages,
extern_libs=extern_libs, is_manual_warmup=False)
def __call__(self, *wargs, grid, num_warps=4, num_stages=2, extern_libs={}, **kwargs):
assert num_warps != 0 and (num_warps & (num_warps - 1)) == 0, f"num_warps={num_warps} must be a power of 2."
# handle arguments passed by name
kwargs = {self.fn.arg_names.index(name): value for name, value in kwargs.items()}
wargs = list(wargs)
for i, pos in enumerate(sorted(kwargs)):
wargs.insert(pos + i, kwargs[pos])
if len(wargs) != len(self.fn.arg_names):
raise TypeError(f"Function takes {len(self.fn.arg_names)} positional arguments but {len(wargs)} were given")
# handle annotations
for pos, _type in self.fn.annotations.items():
assert _type == triton.language.constexpr, "only constexpr annotations are supported for now"
wargs[pos] = _type(wargs[pos])
# check that tensors are on GPU.
# for arg in wargs:
# if hasattr(arg, 'data_ptr'):
# assert arg.is_cuda, "All tensors must be on GPU!"
# set device (i.e., make sure torch has the context initialized)
device = torch.cuda.current_device()
# torch creates new thread for backward pass that may have uninitlialized context
# no way to know if this function should or shouldn't initialize the cuda context
# so we're being conservative here
torch.cuda.set_device(device)
if device not in self.cache_key:
cc = torch.cuda.get_device_capability(device)
cc = str(cc[0]) + '-' + str(cc[1])
self.cache_key[device] = self.fn.cache_key + cc
cache_key = self.cache_key[device]
stream = current_cuda_stream(device)
return _triton.runtime.launch(wargs, self.fn.do_not_specialize, cache_key, self.fn.arg_names,
device, stream, self.fn.bin_cache, num_warps, num_stages, extern_libs, self.add_to_cache,
grid)
class Launcher:
def __init__(self, kernel, grid):
self.kernel = kernel
self.grid = grid
def __call__(self, *wargs, **kwargs):
return self.kernel(*wargs, **kwargs, grid=self.grid)
class Autotuner:
def __init__(self, kernel, arg_names, configs, key, reset_to_zero, prune_configs_by: Dict = None):
'''
:param prune_configs_by: a dict of functions that are used to prune configs, fields:
'perf_model': performance model used to predicate running time with different configs, returns running time
'top_k': number of configs to bench
'prune_num_stages_by'(optional): a function used to prune num_stages. It take configs:List[Config] as its input, and returns pruned configs.
'''
if not configs:
self.configs = [Config(dict(), num_warps=4, num_stages=2)]
else:
self.configs = configs
self.key_idx = [arg_names.index(k) for k in key]
self.cache = dict()
self.kernel = kernel
# hook to reset all required tensor to zeros before relaunching a kernel
self.hook = lambda args: 0
if reset_to_zero is not None:
self.reset_idx = [arg_names.index(k) for k in reset_to_zero]
def _hook(args):
for i in self.reset_idx:
args[i].zero_()
self.hook = _hook
self.arg_names = arg_names
# prune configs
if prune_configs_by:
perf_model, top_k = prune_configs_by['perf_model'], prune_configs_by['top_k']
if 'early_config_prune' in prune_configs_by:
early_config_prune = prune_configs_by['early_config_prune']
else:
perf_model, top_k, early_config_prune = None, None, None
self.perf_model, self.configs_top_k = perf_model, top_k
self.early_config_prune = early_config_prune
def _bench(self, *args, config, **meta):
# check for conflicts, i.e. meta-parameters both provided
# as kwargs and by the autotuner
conflicts = meta.keys() & config.kwargs.keys()
if conflicts:
raise ValueError(
f"Conflicting meta-parameters: {', '.join(conflicts)}."
" Make sure that you don't re-define auto-tuned symbols."
)
# augment meta-parameters with tunable ones
current = dict(meta, **config.kwargs)
def kernel_call():
if config.pre_hook:
config.pre_hook(self.nargs)
self.hook(args)
self.kernel(*args, num_warps=config.num_warps, num_stages=config.num_stages, **current)
return triton.testing.do_bench(kernel_call)
def __call__(self, *args, **kwargs):
self.nargs = dict(zip(self.arg_names, args))
if len(self.configs) > 1:
key = tuple([args[i] for i in self.key_idx])
if key not in self.cache:
# prune configs
pruned_configs = self.configs
if self.early_config_prune:
pruned_configs = self.early_config_prune(self.configs, self.nargs)
if self.perf_model:
top_k = self.configs_top_k
if isinstance(top_k, float) and top_k <= 1.0:
top_k = int(len(self.configs) * top_k)
if len(pruned_configs) > top_k:
est_timing = {config: self.perf_model(**self.nargs, **kwargs, **config.kwargs, num_stages=config.num_stages, num_warps=config.num_warps) for config in pruned_configs}
pruned_configs = sorted(est_timing.keys(), key=lambda x: est_timing[x])[:top_k]
bench_start = time.time()
timings = {config: self._bench(*args, config=config, **kwargs)
for config in pruned_configs}
bench_end = time.time()
self.bench_time = bench_end - bench_start
self.cache[key] = builtins.min(timings, key=timings.get)
self.hook(args)
self.configs_timings = timings
config = self.cache[key]
else:
config = self.configs[0]
self.best_config = config
if config.pre_hook is not None:
config.pre_hook(self.nargs)
return self.kernel(*args, num_warps=config.num_warps, num_stages=config.num_stages, **kwargs, **config.kwargs)
_version_key_lock = threading.Lock()
_version_key = None
def version_key():
global _version_key
if _version_key is not None:
return _version_key
with _version_key_lock:
if _version_key is not None:
return _version_key
import pkgutil
contents = []
# frontend
with open(triton.code_gen.__file__, "rb") as f:
contents += [hashlib.md5(f.read()).hexdigest()]
# backend
with open(triton._C.libtriton.__file__, "rb") as f:
contents += [hashlib.md5(f.read()).hexdigest()]
# language
language_path = os.path.join(*triton.__path__, 'language')
for lib in pkgutil.iter_modules([language_path]):
with open(lib.module_finder.find_spec(lib.name).origin, "rb") as f:
contents += [hashlib.md5(f.read()).hexdigest()]
# ptxas version
try:
ptxas_version = hashlib.md5(subprocess.check_output(["ptxas", "--version"])).hexdigest()
except Exception:
ptxas_version = ''
_version_key = '-'.join(triton.__version__) + '-' + ptxas_version + '-' + '-'.join(contents)
return _version_key
class DependenciesFinder(ast.NodeVisitor):
def __init__(self, globals, src) -> None:
super().__init__()
self.ret = hashlib.md5(src.encode("utf-8")).hexdigest()
self.globals = globals
def visit_Name(self, node):
return self.globals.get(node.id, None)
def visit_Attribute(self, node):
lhs = self.visit(node.value)
while isinstance(lhs, ast.Attribute):
lhs = self.visit(lhs.value)
if lhs is None or lhs is triton:
return None
return getattr(lhs, node.attr)
def visit_Call(self, node):
func = self.visit(node.func)
if func is None:
return
if inspect.isbuiltin(func):
return
if func.__module__ and func.__module__.startswith('triton.'):
return
assert isinstance(func, triton.JITFunction)
if func.hash is None:
tree = ast.parse(func.src)
finder = DependenciesFinder(func.__globals__, func.src)
finder.visit(tree)
func.hash = finder.ret
self.ret = (self.ret + func.hash).encode("utf-8")
self.ret = hashlib.md5(self.ret).hexdigest()
def default_cache_dir():
return os.path.join(os.environ["HOME"], ".triton", "cache")
class JITFunction:
cache_hook = None
def __init__(self, fn, version=None, inline=True, do_not_specialize=None):
# information of wrapped function
self.fn = fn
self.module = fn.__module__
signature = inspect.signature(fn)
self.arg_names = [v.name for v in signature.parameters.values()]
self.arg_defaults = [v.default for v in signature.parameters.values()]
self.version = version
self.inline = inline
self.src = textwrap.dedent(inspect.getsource(fn))
self.src = self.src[self.src.find("def"):]
self.do_not_specialize = [] if do_not_specialize is None else do_not_specialize
self.do_not_specialize = [self.arg_names.index(arg) if isinstance(arg, str) else arg for arg in self.do_not_specialize]
# cache for callable driver objects (e.g. CUkernel)
self.bin_cache = dict()
self.hash = None
# JITFunction can be instantiated as kernel
# when called with a grid using __getitem__
self.kernel_decorators = []
self.kernel = None
# annotations
self.annotations = {self.arg_names.index(name): ty for name, ty in fn.__annotations__.items()}
self.__annotations__ = fn.__annotations__
# constexprs
self.constexprs = [self.arg_names.index(ann) for ann in self.__annotations__.keys()]
# forward docs
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
self.__globals__ = fn.__globals__
self.__module__ = fn.__module__
@property
@functools.lru_cache()
def cache_key(self):
# TODO : hash should be attribute of `self`
if self.hash is None:
dependencies_finder = DependenciesFinder(globals=self.__globals__, src=self.src)
dependencies_finder.visit(self.parse())
self.hash = dependencies_finder.ret + version_key()
return self.hash
# we do not parse `src` in the constructor because
# the user might want to monkey-patch self.src dynamically.
# Some unit tests do this, for example.
def parse(self):
tree = ast.parse(self.src)
assert isinstance(tree, ast.Module)
assert len(tree.body) == 1
assert isinstance(tree.body[0], ast.FunctionDef)
return tree
def __call__(self, *args, **kwargs):
raise RuntimeError("Cannot call @triton.jit'd outside of the scope of a kernel.")
# - when `.src` attribute is set, cache path needs
# to be reinitialized
# - when kernel decorators change, cached kernel
# needs to be cleared
def __setattr__(self, name, value):
if name == 'kernel_decorators':
self.kernel = None
super(JITFunction, self).__setattr__(name, value)
if name == 'src':
self.hash = None
JITFunction.cache_key.fget.cache_clear()
def _init_kernel(self):
if self.kernel is None:
self.kernel = Kernel(self)
for decorator in reversed(self.kernel_decorators):
self.kernel = decorator(self.kernel)
return self.kernel
def warmup(self, compile):
return self._warmup(**compile, is_manual_warmup=True)
def _warmup(self, key, arg_types, device, attributes, constants, num_warps, num_stages, extern_libs, is_manual_warmup):
hashed_key = hashlib.md5(key.encode("utf-8")).hexdigest()
# create cache directory
cache_dir = os.environ.get('TRITON_CACHE_DIR', default_cache_dir())
if cache_dir:
os.makedirs(cache_dir, exist_ok=True)
if cache_dir:
bin_cache_path = os.path.join(cache_dir, hashed_key)
bin_lock_path = bin_cache_path + ".lock"
else:
bin_cache_path = None
bin_lock_path = None
binary = None
if bin_cache_path and os.path.exists(bin_cache_path):
assert bin_lock_path is not None
with FileLock(bin_lock_path):
with open(bin_cache_path, 'rb') as f:
binary = pickle.load(f)["binary"]
compile = dict(arg_types=arg_types, device=device, attributes=attributes, constants=constants, num_warps=num_warps, num_stages=num_stages, extern_libs=extern_libs)
if JITFunction.cache_hook is not None:
name = self.__name__
info = key.split('-')[-3:]
num_warps, num_stages, sig = info[0], info[1], info[2].split('_')[1:]
# make signature human-readable
arg_reprs = []
for arg_name, arg_sig in zip(self.arg_names, sig):
arg_reprs.append(f'{arg_name}: {arg_sig}')
# assemble the repr
arg_reprs = ", ".join(arg_reprs)
repr = f"{name}[num_warps={num_warps}, num_stages={num_stages}]({arg_reprs})"
noop = JITFunction.cache_hook(key=key, repr=repr, fn=self, compile={"key": key, **compile}, is_manual_warmup=is_manual_warmup, already_compiled=binary is not None)
if noop:
return True
if binary is None:
binary = self._compile(**compile)
if bin_cache_path:
assert bin_lock_path is not None
with FileLock(bin_lock_path):
with open(bin_cache_path + ".tmp", "wb") as f:
pickle.dump({"binary": binary, "key": key}, f)
os.rename(bin_cache_path + ".tmp", bin_cache_path)
self.bin_cache[key] = LoadedBinary(device, binary)
return False
def _compile(self, arg_types, device, attributes, constants, num_warps, num_stages, extern_libs):
# create IR module
context = _triton.ir.context()
# get just-in-time proto-type of kernel
arg_types = [Kernel._to_triton_ir(arg) for arg in arg_types]
ret_type = triton.language.void
prototype = triton.language.function_type(ret_type, arg_types)
# generate Triton-IR
# export symbols visible from self into code-generator object
gscope = self.__globals__
generator = CodeGenerator(context, prototype, gscope=gscope, attributes=attributes, constants=constants, is_kernel=True)
try:
generator.visit(self.parse())
except Exception as e:
node = generator.last_node
if node is None or isinstance(e, (NotImplementedError, CompilationError)):
raise e
raise CompilationError(self.src, node) from e
# Compile to machine code
if torch.version.hip is None:
backend = _triton.runtime.backend.CUDA
else:
backend = _triton.runtime.backend.ROCM
name, asm, shared_mem = _triton.code_gen.compile_ttir(backend, generator.module, device, num_warps, num_stages, extern_libs)
max_shared_memory = _triton.runtime.max_shared_memory(backend, device)
if shared_mem > max_shared_memory:
raise OutOfResources(shared_mem, max_shared_memory, "shared memory")
return Binary(backend, name, asm, shared_mem, num_warps)
def __getitem__(self, grid):
return Launcher(self._init_kernel(), grid)
def __repr__(self):
return f"JITFunction({self.module}:{self.fn.__name__})"
class Config:
"""
An object that represents a possible kernel configuration for the auto-tuner to try.
:ivar meta: a dictionary of meta-parameters to pass to the kernel as keyword arguments.
:type meta: dict[Str, Any]
:ivar num_warps: the number of warps to use for the kernel when compiled for GPUs. For example, if
`num_warps=8`, then each kernel instance will be automatically parallelized to
cooperatively execute using `8 * 32 = 256` threads.
:type num_warps: int
:ivar num_stages: the number of stages that the compiler should use when software-pipelining loops.
Mostly useful for matrix multiplication workloads on SM80+ GPUs.
:type num_stages: int
:ivar pre_hook: a function that will be called before the kernel is called. Parameters of this
function are args.
"""
def __init__(self, kwargs, num_warps=4, num_stages=2, pre_hook=None):
self.kwargs = kwargs
self.num_warps = num_warps
self.num_stages = num_stages
self.pre_hook = pre_hook
def __str__(self):
res = []
for k, v in self.kwargs.items():
res.append(f'{k}: {v}')
res.append(f'num_warps: {self.num_warps}')
res.append(f'num_stages: {self.num_stages}')
return ', '.join(res)
def autotune(configs, key, prune_configs_by=None, reset_to_zero=None):
"""
Decorator for auto-tuning a :code:`triton.jit`'d function.
.. highlight:: python
.. code-block:: python
@triton.autotune(configs=[
triton.Config(meta={'BLOCK_SIZE': 128}, num_warps=4),
triton.Config(meta={'BLOCK_SIZE': 1024}, num_warps=8),
],
key=['x_size'] # the two above configs will be evaluated anytime
# the value of x_size changes
)
@triton.jit
def kernel(x_ptr, x_size, **META):
BLOCK_SIZE = META['BLOCK_SIZE']
:note: When all the configurations are evaluated, the kernel will run multiple time.
This means that whatever value the kernel updates will be updated multiple times.
To avoid this undesired behavior, you can use the `reset_to_zero` argument, which
reset the value of the provided tensor to `zero` before running any configuration.
:param configs: a list of :code:`triton.Config` objects
:type configs: list[triton.Config]
:param key: a list of argument names whose change in value will trigger the evaluation of all provided configs.
:type key: list[str]
:param prune_configs_by: a dict of functions that are used to prune configs, fields:
'perf_model': performance model used to predicate running time with different configs, returns running time
'top_k': number of configs to bench
'early_config_prune'(optional): a function used to do early prune (eg, num_stages). It take configs:List[Config] as its input, and returns pruned configs.
:param reset_to_zero: a list of argument names whose value will be reset to zero before evaluating any configs.
:type reset_to_zero: list[str]
"""
def decorator(fn):
def wrapper(kernel):
return Autotuner(kernel, fn.arg_names, configs, key, reset_to_zero, prune_configs_by)
fn.kernel_decorators.append(wrapper)
return fn
return decorator
def heuristics(values):
"""
Decorator for specifying how the values of certain meta-parameters may be computed.
This is useful for cases where auto-tuning is prohibitevely expensive, or just not applicable.
.. highlight:: python
.. code-block:: python
@triton.heuristics(values={'BLOCK_SIZE': lambda args: 2 ** int(math.ceil(math.log2(args[1])))})
@triton.jit
def kernel(x_ptr, x_size, **META):
BLOCK_SIZE = META['BLOCK_SIZE'] # smallest power-of-two >= x_size
.param values: a dictionary of meta-parameter names and functions that compute the value of the meta-parameter.
each such function takes a list of positional arguments as input.
.type values: dict[str, Callable[[list[Any]], Any]]
"""
def decorator(fn):
def wrapper(kernel):
def fun(*args, **meta):
for v, heur in values.items():
assert v not in meta
meta[v] = heur({**dict(zip(fn.arg_names, args)), **meta})
return kernel(*args, **meta)
return fun
fn.kernel_decorators.append(wrapper)
return fn
return decorator
def jit(*args, **kwargs):
"""
Decorator for JIT-compiling a function using the Triton compiler.
:note: When a jit'd function is called, :code:`torch.tensor` arguments are implicitly converted to pointers using the :code:`.data_ptr()` method.
:note: This function will be compiled and run on the GPU. It will only have access to:
* python primitives,
* objects within the triton.language package,
* arguments to this function,
* other jit'd functions
:param fn: the function to be jit-compiled
:type fn: Callable
"""
if args:
assert len(args) == 1
assert callable(args[0])
return JITFunction(args[0], **kwargs)
else:
def decorator(fn):
return JITFunction(fn, **kwargs)
return decorator
######
# class ForwardDeclaration:
# def __init__(self, name, ret_ty, arg_tys) -> None:
# self.name = name
# self.ret_ty = ret_ty
# self.arg_tys = arg_tys
# def forward_declare(name, ret_ty, arg_tys):
# return ForwardDeclaration(name, ret_ty, arg_tys)
######
def cdiv(x, y):
return (x + y - 1) // y
def next_power_of_2(n):
"""Return the smallest power of 2 greater than or equal to n"""
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n
######
class TensorWrapper:
def __init__(self, base, dtype):
self.dtype = dtype
self.base = base
self.is_cuda = base.is_cuda
self.device = base.device
def data_ptr(self):
return self.base.data_ptr()
def __str__(self) -> str:
return f'TensorWrapper[{self.dtype}]({self.base})'
def reinterpret(tensor, dtype):
if isinstance(tensor, TensorWrapper):
if dtype == tensor.base.dtype:
# Reinterpreting to the original interpretation; return the base.
return tensor.base
else:
# Reinterpreting a wrapped tensor to a different type.
return TensorWrapper(tensor.base, dtype)
elif isinstance(tensor, torch.Tensor):
# A new wrapper is needed around an unwrapped tensor.
return TensorWrapper(tensor, dtype)
else:
raise TypeError(f'Cannot reinterpret a {type(tensor)}.')
| triton-master | python/triton/code_gen.py |
import functools
import os
import subprocess
import sys
from contextlib import contextmanager
import torch
import triton._C.libtriton.triton as _triton
from .code_gen import OutOfResources
try:
import triton._C.libtriton.cutlass as _cutlass
has_cutlass = True
except ImportError:
_cutlass = None
has_cutlass = False
def catch_oor(kernel, pytest_handle=None):
try:
res = kernel()
except OutOfResources as e:
if pytest_handle:
pytest_handle.skip(str(e))
return None
return res
def sparsify_tensor(x, mask, block):
ret = torch.empty((x.size(0), mask.sum(), block, block), dtype=x.dtype, device=x.device)
for idx, (h, i, j) in enumerate(zip(*mask.nonzero(as_tuple=True))):
ret[:, idx, :, :] = x[:, h, i * block:(i + 1) * block, j * block:(j + 1) * block]
return ret
def make_pair(shape, device="cuda", alpha=1e-2, beta=0., trans=False, data=None):
if data is None:
data = torch.randn(shape, dtype=torch.float32, device=device)
ref_ret = data
ref_ret = ref_ret * alpha + beta
ref_ret = ref_ret.half().float()
if trans:
ref_ret = ref_ret.t().requires_grad_()
ref_ret = ref_ret.detach().requires_grad_()
tri_ret = ref_ret.clone().detach().requires_grad_()
return ref_ret, tri_ret
def cutlass_matmul(a, b):
if _cutlass is None:
raise RuntimeError("Cannot find cutlass library")
M, N = a.shape[0], b.shape[1]
Ka, Kb = a.shape[1], b.shape[0]
assert Ka == Kb
assert a.dtype == b.dtype
assert a.device == b.device
# allocate output
c = torch.empty_strided((M, N), (1, M), dtype=a.dtype, device=a.device)
# run function
dtype = str(a.dtype).split('.')[-1]
_cutlass.matmul(a.data_ptr(), b.data_ptr(), c.data_ptr(),
M, N, Ka,
a.stride(0), a.stride(1),
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
dtype, dtype, dtype,
a.device.index, torch.cuda.current_stream(a.device).cuda_stream)
return c
def mask_tensor(x, mask, block, value=0):
ret = x.clone()
for h, i, j in zip(*(mask == 0).nonzero(as_tuple=True)):
ret[:, h, i * block:(i + 1) * block, j * block:(j + 1) * block] = value
return ret
def assert_almost_equal(x, y, decimal=2, err_msg=''):
import numpy.testing as npt
if isinstance(x, torch.Tensor):
if x.dtype == torch.bfloat16:
x = x.float()
x = x.cpu().detach().numpy()
if isinstance(y, torch.Tensor):
if y.dtype == torch.bfloat16:
y = y.float()
y = y.cpu().detach().numpy()
npt.assert_array_almost_equal(x, y, err_msg=err_msg, decimal=decimal)
def allclose(x, y, tol=1e-2):
if x.dtype != y.dtype:
raise RuntimeError(f'{x.dtype} did not match with {x.dtype}')
if x.shape != y.shape:
raise RuntimeError(f'{x.shape} did not match with {y.shape}')
if x.dtype == torch.bool:
return torch.sum(x ^ y) == 0
if x.dtype in [torch.int8, torch.int16, torch.int32, torch.int64]:
tol = 0
diff = abs(x - y)
x_max = torch.max(x)
y_max = torch.max(y)
tol = 1e-2
err = torch.max(diff) / torch.max(x_max, y_max)
return err <= tol
def nvsmi(attrs):
attrs = ','.join(attrs)
cmd = ['nvidia-smi', '-i', '0', '--query-gpu=' + attrs, '--format=csv,noheader,nounits']
out = subprocess.check_output(cmd)
ret = out.decode(sys.stdout.encoding).split(',')
ret = [int(x) for x in ret]
return ret
def do_bench(fn, warmup=25, rep=100, grad_to_none=None, percentiles=[0.5, 0.2, 0.8], record_clocks=False):
"""
Benchmark the runtime of the provided function. By default, return the median runtime of :code:`fn` along with
the 20-th and 80-th performance percentile.
:param fn: Function to benchmark
:type fn: Callable
:param warmup: Warmup time (in ms)
:type warmup: int
:param rep: Repetition time (in ms)
:type rep: int
:param grad_to_none: Reset the gradient of the provided tensor to None
:type grad_to_none: torch.tensor, optional
:param percentiles: Performance percentile to return in addition to the median.
:type percentiles: list[float]
"""
# Estimate the runtime of the function
fn()
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for _ in range(5):
fn()
end_event.record()
torch.cuda.synchronize()
estimate_ms = start_event.elapsed_time(end_event) / 5
# compute number of warmup and repeat
n_warmup = max(1, int(warmup / estimate_ms))
n_repeat = max(1, int(rep / estimate_ms))
# We maintain a buffer of 256 MB that we clear
# before each kernel call to make sure that the L2
# doesn't contain any input data before the run
start_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)]
end_event = [torch.cuda.Event(enable_timing=True) for i in range(n_repeat)]
cache = torch.empty(int(256e6), dtype=torch.int8, device='cuda')
# Warm-up
for _ in range(n_warmup):
fn()
# Benchmark
for i in range(n_repeat):
# we don't want `fn` to accumulate gradient values
# if it contains a backward pass. So we clear the
# provided gradients
if grad_to_none is not None:
for x in grad_to_none:
x.grad = None
# we clear the L2 cache before each run
cache.zero_()
# record time of `fn`
start_event[i].record()
fn()
end_event[i].record()
# Record clocks
torch.cuda.synchronize()
times = torch.tensor([s.elapsed_time(e) for s, e in zip(start_event, end_event)])
if percentiles:
percentiles = torch.quantile(times, torch.tensor(percentiles)).tolist()
return tuple(percentiles)
else:
return torch.mean(times).item()
class Benchmark:
"""
This class is used by the :code:`perf_report` function to generate line plots with a concise API.
"""
def __init__(
self,
x_names,
x_vals,
line_arg,
line_vals,
line_names,
plot_name,
args,
xlabel='',
ylabel='',
x_log=False,
y_log=False,
color=None,
styles=None,
):
"""
Constructor
:param x_names: Name of the arguments that should appear on the x axis of the plot. If the list contains more than one element, all the arguments are assumed to have the same value.
:type x_names: List[str]
:param x_vals: List of values to use for the arguments in :code:`x_names`.
:type x_vals: List[Any]
:param line_arg: Argument name for which different values correspond to different lines in the plot.
:type line_arg: str
:param line_vals: List of values to use for the arguments in :code:`line_arg`.
:type line_vals: List[str]
:param line_names: Label names for the different lines.
:type line_names: List[str]
:param plot_name: Name of the plot.
:type plot_name: str
:param args: List of arguments to remain fixed throughout the benchmark.
:type args: List[str]
:param xlabel: Label for the x axis of the plot.
:type xlabel: str, optional
:param ylabel: Label for the y axis of the plot.
:type ylabel: str, optional
:param x_log: Whether the x axis should be log scale.
:type x_log: bool, optional
:param y_log: Whether the y axis should be log scale.
:type y_log: bool, optional
"""
self.x_names = x_names
self.x_vals = x_vals
self.x_log = x_log
self.line_arg = line_arg
self.line_vals = line_vals
self.line_names = line_names
self.y_log = y_log
self.styles = styles
# plot info
self.xlabel = xlabel
self.ylabel = ylabel
self.plot_name = plot_name
self.args = args
class Mark:
def __init__(self, fn, benchmarks):
self.fn = fn
self.benchmarks = benchmarks
def _run(self, bench, save_path, show_plots, print_data):
import os
import matplotlib.pyplot as plt
import pandas as pd
y_mean = bench.line_names
y_min = [f'{x}-min' for x in bench.line_names]
y_max = [f'{x}-max' for x in bench.line_names]
df = pd.DataFrame(columns=[bench.x_names[0]] + y_mean + y_min + y_max)
for x in bench.x_vals:
x_args = {x_name: x for x_name in bench.x_names}
row_mean, row_min, row_max = [], [], []
for y in bench.line_vals:
ret = self.fn(**x_args, **{bench.line_arg: y}, **bench.args)
try:
y_mean, y_min, y_max = ret
except TypeError:
y_mean, y_min, y_max = ret, None, None
row_mean += [y_mean]
row_min += [y_min]
row_max += [y_max]
df.loc[len(df)] = [x] + row_mean + row_min + row_max
if bench.plot_name:
plt.figure()
ax = plt.subplot()
x = bench.x_names[0]
for i, y in enumerate(bench.line_names):
y_min, y_max = df[y + '-min'], df[y + '-max']
col = bench.styles[i][0] if bench.styles else None
sty = bench.styles[i][1] if bench.styles else None
ax.plot(df[x], df[y], label=y, color=col, ls=sty)
if y_min is not None and y_max is not None:
ax.fill_between(df[x], y_min, y_max, alpha=0.15, color=col)
ax.legend()
xlabel = bench.xlabel if bench.xlabel else " = ".join(bench.x_names)
ax.set_xlabel(xlabel)
ax.set_ylabel(bench.ylabel)
# ax.set_title(bench.plot_name)
ax.set_xscale("log" if bench.x_log else "linear")
ax.set_yscale("log" if bench.y_log else "linear")
if show_plots:
plt.show()
if save_path:
plt.savefig(os.path.join(save_path, f"{bench.plot_name}.png"))
df = df[[bench.x_names[0]] + bench.line_names]
if print_data:
print(bench.plot_name + ':')
print(df)
if save_path:
df.to_csv(os.path.join(save_path, f"{bench.plot_name}.csv"), float_format='%.1f', index=False)
def run(self, show_plots=False, print_data=False, save_path=''):
has_single_bench = isinstance(self.benchmarks, Benchmark)
benchmarks = [self.benchmarks] if has_single_bench else self.benchmarks
if save_path:
html = open(os.path.join(save_path, "results.html"), "w")
html.write("<html><body>\n")
for bench in benchmarks:
self._run(bench, save_path, show_plots, print_data)
if save_path:
html.write(f"<image src=\"{bench.plot_name}.png\"/>\n")
if save_path:
html.write("</body></html>\n")
def perf_report(benchmarks):
"""
Mark a function for benchmarking. The benchmark can then be executed by using the :code:`.run` method on the return value.
:param benchmarks: Benchmarking configurations.
:type benchmarks: List of :class:`Benchmark`
"""
wrapper = lambda fn: Mark(fn, benchmarks)
return wrapper
def get_dram_gbps(backend=None, device=None):
''' return DRAM bandwidth in GB/s '''
# assert backend == CUDA
if not backend:
backend = _triton.runtime.backend.CUDA
if not device:
device = torch.cuda.current_device()
mem_clock_khz = _triton.runtime.memory_clock_rate(backend, device)
bus_width = _triton.runtime.global_memory_bus_width(backend, device)
bw_gbps = mem_clock_khz * bus_width * 2 / 1e6 / 8 # In GB/s
return bw_gbps
def get_max_tensorcore_tflops(dtype: torch.dtype, backend=None, device=None, clock_rate=None):
if not backend:
backend = _triton.runtime.backend.CUDA
if not device:
device = torch.cuda.current_device()
num_subcores = _triton.runtime.num_sm(backend, device) * 4 # on recent GPUs
if not clock_rate:
clock_rate = _triton.runtime.clock_rate(backend, device) # in kHz
cc = _triton.runtime.cc(backend, device)
if cc < 80:
assert dtype == torch.float16
ops_per_sub_core = 256 # 2 4x4x4 Tensor Cores
else:
if dtype == torch.float32:
ops_per_sub_core = 256
elif dtype in [torch.float16, torch.bfloat16]:
ops_per_sub_core = 512
elif dtype == torch.int8:
ops_per_sub_core = 1024
else:
raise RuntimeError("dtype not supported")
tflops = num_subcores * clock_rate * ops_per_sub_core * 1e-9
return tflops
# create decorator that wraps test function into
# a cuda-memcheck system call
def cuda_memcheck(**target_kwargs):
def decorator(test_fn):
@functools.wraps(test_fn)
def wrapper(*args, **kwargs):
import psutil
ppid_name = psutil.Process(os.getppid()).name()
run_cuda_memcheck = target_kwargs.items() <= kwargs.items()
if run_cuda_memcheck and ppid_name != "cuda-memcheck":
path = os.path.realpath(test_fn.__globals__["__file__"])
# get path of current file
env = {"PATH": os.environ["PATH"], "PYTORCH_NO_CUDA_MEMORY_CACHING": "1"}
assert 'request' in kwargs, "memcheck'ed test must have a (possibly unused) `request` fixture"
test_id = kwargs['request'].node.callspec.id
cmd = f"{path}::{test_fn.__name__}[{test_id}]"
out = subprocess.run(["cuda-memcheck", "pytest", "-vs", cmd], capture_output=True, env=env)
assert out.returncode == 0, "cuda-memcheck returned an error: bounds checkng failed"
assert "ERROR SUMMARY: 0 errors" in str(out.stdout)
else:
test_fn(*args, **kwargs)
return wrapper
return decorator
def nvsmi_attr(attrs):
attrs = ",".join(attrs)
cmd = [
"nvidia-smi",
"-i",
"0",
"--query-gpu=" + attrs,
"--format=csv,noheader,nounits",
]
out = subprocess.check_output(cmd)
ret = out.decode(sys.stdout.encoding).split(",")
ret = [int(x) for x in ret]
return ret
@contextmanager
def set_gpu_clock(ref_sm_clock=1350, ref_mem_clock=1215):
try:
subprocess.check_output(["nvidia-smi", "-i", "0", "-pm", "1"])
subprocess.check_output(
[
"nvidia-smi",
"-i",
"0",
f"--lock-gpu-clocks={ref_sm_clock},{ref_sm_clock}",
]
)
subprocess.check_output(
[
"nvidia-smi",
"-i",
"0",
f"--lock-memory-clocks={ref_mem_clock},{ref_mem_clock}",
]
)
cur_sm_clock = nvsmi_attr(["clocks.current.sm"])[0]
cur_mem_clock = nvsmi_attr(["clocks.current.memory"])[0]
assert abs(cur_sm_clock - ref_sm_clock) < 10, f"GPU SMs must run at {ref_sm_clock} MHz"
assert abs(cur_mem_clock - ref_mem_clock) < 10, f"GPU SMs must run at {ref_mem_clock} MHz"
tflops = 1e-6 * 2 * 108 * 4 * 256 * ref_sm_clock
gbps = 640 * 2 * ref_mem_clock * 1e-3
yield tflops, gbps
finally:
subprocess.check_output(["nvidia-smi", "-i", "0", "-pm", "0"])
subprocess.check_output(["nvidia-smi", "-i", "0", "-rgc"])
subprocess.check_output(["nvidia-smi", "-i", "0", "-rmc"])
def get_max_simd_tflops(dtype: torch.dtype, backend=None, device=None):
if not backend:
backend = _triton.runtime.backend.CUDA
if not device:
device = torch.cuda.current_device()
num_subcores = _triton.runtime.num_sm(backend, device) * 4 # on recent GPUs
clock_rate = _triton.runtime.clock_rate(backend, device) # in kHz
cc = _triton.runtime.cc(backend, device)
if cc < 80:
if dtype == torch.float32:
ops_per_sub_core = 32 # 2*16
elif dtype == torch.float16:
ops_per_sub_core = 64
else:
raise RuntimeError("dtype not supported")
else:
if dtype == torch.float32:
ops_per_sub_core = 32
elif dtype in [torch.float16, torch.bfloat16]:
ops_per_sub_core = 64
else:
raise RuntimeError("dtype not supported")
tflops = num_subcores * clock_rate * ops_per_sub_core * 1e-9
return tflops
| triton-master | python/triton/testing.py |
'''
Compare cached triton kernels in 2 directories.
example:
python compare_asm.py --dir0=triton-works/ --dir1=triton-fails/ --asm=ttir \
--diff-out0=diff-works.ll --diff-out1=diff-fails.ll
'''
import argparse
import os
import pickle
parser = argparse.ArgumentParser(description="unpickle")
parser.add_argument('--dir0', dest='dir0', required=True,
help="Triton cache dir 0")
parser.add_argument('--dir1', dest='dir1', required=True,
help="Triton cache dir 1")
parser.add_argument('--asm', dest='asm',
choices=['ttir', 'llir', 'ptx', 'cubin'], required=True)
parser.add_argument('--early-stop', dest='early_stop', action='store_true',
help="Stop after first diff")
parser.set_defaults(early_stop=True)
parser.add_argument('--diff-out0', dest='diff_out0', required=True,
help="output file path for kernels in dir0")
parser.add_argument('--diff-out1', dest='diff_out1', required=True,
help="output file path for kernels in dir1")
args = parser.parse_args()
dir0 = args.dir0
dir1 = args.dir1
asm = args.asm
dir0_files = {}
dir1_files = {}
for root, _, files in os.walk(dir0):
for file in files:
if not file.endswith('.lock'):
path = os.path.join(root, file)
with open(path, 'rb') as f:
loaded_file = pickle.load(f)
bin = loaded_file['binary']
key = loaded_file['key']
info = key.split('-')[-3:] # num_warps, num_stages, signature
dict_key = bin.name + '-'.join(info)
dir0_files[dict_key] = bin.asm
for root, _, files in os.walk(dir1):
for file in files:
if not file.endswith('.lock'):
path = os.path.join(root, file)
with open(path, 'rb') as f:
loaded_file = pickle.load(f)
bin = loaded_file['binary']
key = loaded_file['key']
info = key.split('-')[-3:] # num_warps, num_stages, signature
dict_key = bin.name + '-'.join(info)
dir1_files[dict_key] = bin.asm
diff_keys = []
for key in dir0_files:
asm0 = dir0_files[key]
if key not in dir1_files:
continue
asm1 = dir1_files[key]
if asm0[asm] != asm1[asm]:
diff_keys.append(key)
if args.early_stops:
diff_keys = diff_keys[:1]
if diff_keys:
with open(args.diff_out0, 'w') as f0, open(args.diff_out1, 'w') as f1:
for key in diff_keys:
f0.write(f'{asm} mismatch at {key}')
f0.write(dir0_files[key][asm])
f0.write('\n')
f1.write(f'{asm} mismatch at {key}')
f1.write(dir1_files[key][asm])
f1.write('\n')
| triton-master | python/triton/tools/compare_asm.py |
# MIT License
# Copyright (c) 2020 Da Yan @ HKUST
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import subprocess
FLINE_RE = re.compile(r'\s*/\*\w{4}\*/\s*([^;]*;)\s*/\* 0x(\w{16}) \*/\s*')
SLINE_RE = re.compile(r'\s*/\* 0x(\w{16}) \*/\s*')
FNAME_RE = re.compile(r'\s*Function : (\w+)\s*')
BRA_RE = re.compile(r'(.*BRA(?:\.U)? )(0x\w+);')
def parseCtrl(sline):
enc = int(SLINE_RE.match(sline).group(1), 16)
stall = (enc >> 41) & 0xf
yld = (enc >> 45) & 0x1
wrtdb = (enc >> 46) & 0x7
readb = (enc >> 49) & 0x7
watdb = (enc >> 52) & 0x3f
yld_str = 'Y' if yld == 0 else '-'
wrtdb_str = '-' if wrtdb == 7 else str(wrtdb)
readb_str = '-' if readb == 7 else str(readb)
watdb_str = '--' if watdb == 0 else f'{watdb:02d}'
return f'{watdb_str}:{readb_str}:{wrtdb_str}:{yld_str}:{stall:x}'
def processSassLines(fline, sline, labels):
asm = FLINE_RE.match(fline).group(1)
# Remove tailing space
if asm.endswith(" ;"):
asm = asm[:-2] + ";"
ctrl = parseCtrl(sline)
# BRA target address
if BRA_RE.match(asm) is not None:
target = int(BRA_RE.match(asm).group(2), 16)
if target in labels:
pass
else:
labels[target] = len(labels)
return (f'{ctrl}', f'{asm}')
def extract(file_path, fun):
if fun is None:
sass_str = subprocess.check_output(["cuobjdump", "-sass", file_path])
else:
sass_str = subprocess.check_output(["cuobjdump", "-fun", fun, "-sass", file_path])
sass_lines = sass_str.splitlines()
line_idx = 0
while line_idx < len(sass_lines):
line = sass_lines[line_idx].decode()
# format:
# function : <function_name>
# .headerflags: ...
# /*0000*/ asmstr /*0x...*/
# /*0x...*/
# Looking for new function header (function: <name>)
while FNAME_RE.match(line) is None:
line_idx += 1
if line_idx < len(sass_lines):
line = sass_lines[line_idx].decode()
else:
return
fname = FNAME_RE.match(line).group(1)
ret = ''
ret += f'Function:{fname}\n'
line_idx += 2 # bypass .headerflags
line = sass_lines[line_idx].decode()
# Remapping address to label
labels = {} # address -> label_idx
# store sass asm in buffer and them print them (for labels)
# (ctrl, asm)
asm_buffer = []
while FLINE_RE.match(line) is not None:
# First line (Offset ASM Encoding)
fline = sass_lines[line_idx].decode()
line_idx += 1
# Second line (Encoding)
sline = sass_lines[line_idx].decode()
line_idx += 1
asm_buffer.append(processSassLines(fline, sline, labels))
# peek the next line
line = sass_lines[line_idx].decode()
# Print sass
# label naming convension: LBB#i
for idx, (ctrl, asm) in enumerate(asm_buffer):
# Print label if this is BRA target
offset = idx * 16
if offset in labels:
label_name = f'LBB{labels[offset]}'
ret += f'{label_name}:\n'
ret += ctrl + '\t'
# if this is BRA, remap offset to label
if BRA_RE.match(asm):
target = int(BRA_RE.match(asm).group(2), 16)
target_name = f'LBB{labels[target]}'
asm = BRA_RE.sub(rf'\1{target_name};', asm)
ret += asm + '\n'
ret += '\n'
return ret
| triton-master | python/triton/tools/disasm.py |
import argparse
import subprocess
from abc import ABC, abstractmethod
class Symbol:
def __init__(self, name: str, op_name: str, ret_type: str, arg_names: list, arg_types: list) -> None:
'''
A symbol is a function declaration.
:param name: name of the symbol
:param op_name: name of the operation
:param ret_type: return type of the operation
:param arg_names: names of the arguments
:param arg_types: types of the arguments
'''
self._name = name
self._op_name = op_name
self._ret_type = ret_type
self._arg_names = arg_names
self._arg_types = arg_types
@property
def name(self):
return self._name
@property
def op_name(self):
return self._op_name
@property
def ret_type(self):
return self._ret_type
@property
def arg_names(self):
return self._arg_names
@property
def arg_types(self):
return self._arg_types
def convert_type(type_str):
if type_str == "i32":
return "int32"
elif type_str == "u32":
return "uint32"
elif type_str == "i64":
return "int64"
elif type_str == "u64":
return "uint64"
elif type_str == "float":
return "fp32"
elif type_str == "double":
return "fp64"
else:
# ignore other types, such as pointer types
return None
def to_unsigned(type_str):
if type_str == "int32":
return "uint32"
elif type_str == "int64":
return "uint64"
else:
return type_str
class ExternLibrary(ABC):
def __init__(self, name: str, path: str, format: bool = True, grouping: bool = True) -> None:
'''
Abstract class for extern library.
:param name: name of the library
:param path: path of the library
:param format: whether to format the generated stub file
'''
self._name = name
self._path = path
self._symbols = {}
self._format = True
self._grouping = grouping
@property
def name(self):
return self._name
@property
def path(self):
return self._path
@property
def symbols(self):
return self._symbols
@property
def grouping(self):
return self._grouping
@abstractmethod
def parse_symbols(self, input_file):
pass
@abstractmethod
def _output_stubs(self) -> str:
pass
def generate_stub_file(self, output_dir):
file_str = self._output_stubs()
if file_str is None or len(file_str) == 0:
raise Exception("file_str is empty")
output_file = f"{output_dir}/{self._name}.py"
with open(output_file, "w") as f:
f.write(file_str)
f.close()
if self._format:
subprocess.Popen(["autopep8", "-a", "-r", "-i", output_file],
stdout=subprocess.PIPE).communicate()
subprocess.Popen(["isort", output_file], stdout=subprocess.PIPE).communicate()
class Libdevice(ExternLibrary):
def __init__(self, path) -> None:
'''
Constructor for Libdevice.
:param path: path of the libdevice library
'''
super().__init__("libdevice", path)
self._symbol_groups = {}
def _extract_symbol(self, line):
# Extract symbols from line in the following format:
# "define [internal] <ret_type> @<name>(<arg_types>,)"
entries = line.split("@")
ret_str = entries[0]
func_str = entries[1]
# Get ret_type, skip internal symbols
ret_strs = ret_str.split()
if ret_strs[1] == "internal":
return None
ret_type = convert_type(ret_strs[1])
if ret_type is None:
return None
# Get function name
func_strs = func_str.split("(")
func_name = func_strs[0].replace("@", "")
op_name = func_name.replace("__nv_", "")
# Get arg_types
arg_strs = func_strs[1].split(",")
arg_types = []
arg_names = []
for i, arg_str in enumerate(arg_strs):
arg_type = convert_type(arg_str.split()[0])
if arg_type is None:
return None
arg_name = 'arg' + str(i)
arg_types.append(arg_type)
arg_names.append(arg_name)
if op_name == "sad":
# Special case for sad, where the last argument is an unsigned int
arg_types[-1] = to_unsigned(arg_types[-1])
elif op_name.startswith("u"):
# LLVM does not differentiate between signed and unsigned integer type.
# We have to convert the types to unsigned
ret_type = to_unsigned(ret_type)
for i, arg_type in enumerate(arg_types):
arg_types[i] = to_unsigned(arg_type)
return Symbol(func_name, op_name, ret_type, arg_names, arg_types)
def _group_symbols(self):
symbol_set = {}
for symbol in self._symbols.values():
op_name = symbol.op_name
symbol_set[op_name] = symbol
# The following cases are grouped together:
# op_name, <u/ull/ll>op_name<ll/f/i>
for symbol in self._symbols.values():
op_name = symbol.op_name
if "max" in op_name:
op_name = "max"
elif "min" in op_name:
op_name = "min"
elif "abs" in op_name:
op_name = "abs"
elif "pow" in op_name and "fast" in op_name:
op_name = "pow"
elif "round" in op_name:
if "llround" in op_name:
op_name = "llround"
else:
op_name = "round"
elif "rint" in op_name:
if "llrint" in op_name:
op_name = "llrint"
else:
op_name = "rint"
elif op_name.startswith("ull"):
if "2" not in op_name:
# e.g., ullmax->max
op_name = op_name[3:]
else:
# e.g., ull2double->ll2double
op_name = op_name[1:]
elif op_name.startswith("u"):
if "2" not in op_name:
# e.g., uhadd->hadd
op_name = op_name[1:]
else:
# e.g., uint2double_rn->int2double_rn
op_name = op_name[1:]
elif op_name.startswith("ll"):
if "2" not in op_name:
# e.g., llmax->max
op_name = op_name[2:]
elif op_name.endswith("ll"):
op_name = op_name[:-2]
elif op_name.endswith("f"):
op_name = op_name[:-1]
if op_name in symbol_set:
# Update op_name only if there's an existing symbol
symbol._op_name = op_name
else:
op_name = symbol._op_name
if op_name in self._symbol_groups:
self._symbol_groups[op_name].append(symbol)
else:
self._symbol_groups[op_name] = [symbol]
def parse_symbols(self, input_file):
if len(self.symbols) > 0:
return
output = subprocess.check_output(["grep", "define", input_file]).decode().splitlines()
for line in output:
symbol = self._extract_symbol(line)
if symbol is None:
continue
self._symbols[symbol.name] = symbol
self._group_symbols()
def _output_stubs(self):
# Generate python functions in the following format:
# @extern.extern
# def <op_name>(<args>, _builder=None):
# arg_type_symbol_dict = {[arg_type]: {(symbol, ret_type)}}
# return extern.dispatch("libdevice", <path>, <args>, <arg_type_symbol_dict>, _builder)
import_str = "from . import core, extern\n"
import_str += "import os\n"
header_str = "LIBDEVICE_PATH = os.path.dirname(os.path.abspath(__file__)) + \"/libdevice.10.bc\"\n"
func_str = ""
for symbols in self._symbol_groups.values():
func_str += "@extern.extern\n"
func_name_str = f"def {symbols[0].op_name}("
for arg_name in symbols[0].arg_names:
func_name_str += f"{arg_name}, "
func_name_str += "_builder=None):\n"
return_str = f"\treturn extern.elementwise(\"{self._name}\", LIBDEVICE_PATH, ["
for arg_name in symbols[0].arg_names:
return_str += f"{arg_name}, "
return_str += "], \n"
arg_type_symbol_dict_str = "{"
for symbol in symbols:
arg_type_symbol_dict_str += "("
for arg_type in symbol.arg_types:
arg_type_symbol_dict_str += f"core.dtype(\"{arg_type}\"),"
ret_type = f"core.dtype(\"{symbol.ret_type}\")"
arg_type_symbol_dict_str += "): (\"" + symbol.name + "\", " + ret_type + "),\n"
arg_type_symbol_dict_str += "}"
return_str += arg_type_symbol_dict_str
return_str += ", _builder)\n"
func_str += func_name_str + return_str + "\n"
file_str = import_str + header_str + func_str
return file_str
class LLVMDisassembler:
def __init__(self, path):
'''
Invoke llvm-dis to disassemble the given file.
:param path: path to llvm-dis
'''
self._path = path
self._ll_file = "/tmp/extern_lib.ll"
def disasm(self, lib_path):
subprocess.Popen([self._path, lib_path, "-o", self.ll_file],
stdout=subprocess.PIPE).communicate()
@property
def ll_file(self):
return self._ll_file
@property
def path(self):
return self._path
extern_libs = ["libdevice"]
def build(llvm_dis_path, lib_path, lib_name, output_dir):
'''
Interface function to build the library file.
:param llvm_dis_path: path to the llvm-dis binary
:param lib_path: path to the external library file
:param lib_name: name of the library
:param output_dir: path to the output directory
'''
if lib_name == "libdevice":
extern_lib = Libdevice(lib_path)
else:
raise Exception(f"Unknown extern library: {lib_name}")
llvm_disassembler = LLVMDisassembler(llvm_dis_path)
llvm_disassembler.disasm(lib_path)
extern_lib.parse_symbols(llvm_disassembler.ll_file)
extern_lib.generate_stub_file(output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-llvm", dest="llvm_dis_path", help="path to llvm-dis", default="llvm-dis")
parser.add_argument("--lib-path", dest="lib_path", help="path to the extern library")
parser.add_argument("--lib-name", dest="lib_name", help="name of the extern library")
parser.add_argument("-o", dest="output_dir", help="output file path", default="/tmp/")
args = parser.parse_args()
build(args.llvm_dis_path, args.lib_path, args.lib_name, args.output_dir)
| triton-master | python/triton/tools/build_extern.py |
triton-master | python/triton/tools/__init__.py |
|
import os
from . import core, extern
LIBDEVICE_PATH = os.path.dirname(
os.path.abspath(__file__)) + "/libdevice.10.bc"
@extern.extern
def clz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_clz", core.dtype("int32")),
(core.dtype("int64"),): ("__nv_clzll", core.dtype("int32")),
}, _builder)
@extern.extern
def popc(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_popc", core.dtype("int32")),
(core.dtype("int64"),): ("__nv_popcll", core.dtype("int32")),
}, _builder)
@extern.extern
def byte_perm(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("int32"), core.dtype("int32"), core.dtype("int32"),): ("__nv_byte_perm", core.dtype("int32")),
}, _builder)
@extern.extern
def min(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_min", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umin", core.dtype("uint32")),
(core.dtype("int64"), core.dtype("int64"),): ("__nv_llmin", core.dtype("int64")),
(core.dtype("uint64"), core.dtype("uint64"),): ("__nv_ullmin", core.dtype("uint64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fminf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmin", core.dtype("fp64")),
}, _builder)
@extern.extern
def max(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_max", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umax", core.dtype("uint32")),
(core.dtype("int64"), core.dtype("int64"),): ("__nv_llmax", core.dtype("int64")),
(core.dtype("uint64"), core.dtype("uint64"),): ("__nv_ullmax", core.dtype("uint64")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaxf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmax", core.dtype("fp64")),
}, _builder)
@extern.extern
def mulhi(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_mulhi", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umulhi", core.dtype("uint32")),
}, _builder)
@extern.extern
def mul64hi(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("int64"), core.dtype("int64"),): ("__nv_mul64hi", core.dtype("int64")),
(core.dtype("uint64"), core.dtype("uint64"),): ("__nv_umul64hi", core.dtype("uint64")),
}, _builder)
@extern.extern
def mul24(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_mul24", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umul24", core.dtype("uint32")),
}, _builder)
@extern.extern
def brev(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_brev", core.dtype("int32")),
(core.dtype("int64"),): ("__nv_brevll", core.dtype("int64")),
}, _builder)
@extern.extern
def sad(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("int32"), core.dtype("int32"), core.dtype("uint32"),): ("__nv_sad", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"), core.dtype("uint32"),): ("__nv_usad", core.dtype("uint32")),
}, _builder)
@extern.extern
def abs(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_abs", core.dtype("int32")),
(core.dtype("int64"),): ("__nv_llabs", core.dtype("int64")),
(core.dtype("fp32"),): ("__nv_fabsf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_fabs", core.dtype("fp64")),
}, _builder)
@extern.extern
def floor(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_floorf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_floor", core.dtype("fp64")),
}, _builder)
@extern.extern
def rcp64h(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_rcp64h", core.dtype("fp64")),
}, _builder)
@extern.extern
def rsqrt(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_rsqrtf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_rsqrt", core.dtype("fp64")),
}, _builder)
@extern.extern
def ceil(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_ceil", core.dtype("fp64")),
(core.dtype("fp32"),): ("__nv_ceilf", core.dtype("fp32")),
}, _builder)
@extern.extern
def trunc(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_trunc", core.dtype("fp64")),
(core.dtype("fp32"),): ("__nv_truncf", core.dtype("fp32")),
}, _builder)
@extern.extern
def exp2(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_exp2f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_exp2", core.dtype("fp64")),
}, _builder)
@extern.extern
def saturatef(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_saturatef", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmaf_rn(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmaf_rz(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmaf_rd(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmaf_ru(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmaf_ieee_rn(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_ieee_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmaf_ieee_rz(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_ieee_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmaf_ieee_rd(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_ieee_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmaf_ieee_ru(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_ieee_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def fma_rn(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rn", core.dtype("fp64")),
}, _builder)
@extern.extern
def fma_rz(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rz", core.dtype("fp64")),
}, _builder)
@extern.extern
def fma_rd(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rd", core.dtype("fp64")),
}, _builder)
@extern.extern
def fma_ru(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_ru", core.dtype("fp64")),
}, _builder)
@extern.extern
def fast_fdividef(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fast_fdividef", core.dtype("fp32")),
}, _builder)
@extern.extern
def fdiv_rn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def fdiv_rz(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def fdiv_rd(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def fdiv_ru(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def frcp_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_frcp_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def frcp_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_frcp_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def frcp_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_frcp_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def frcp_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_frcp_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def fsqrt_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fsqrt_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def fsqrt_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fsqrt_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def fsqrt_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fsqrt_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def fsqrt_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fsqrt_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def ddiv_rn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rn", core.dtype("fp64")),
}, _builder)
@extern.extern
def ddiv_rz(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rz", core.dtype("fp64")),
}, _builder)
@extern.extern
def ddiv_rd(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rd", core.dtype("fp64")),
}, _builder)
@extern.extern
def ddiv_ru(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_ru", core.dtype("fp64")),
}, _builder)
@extern.extern
def drcp_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_drcp_rn", core.dtype("fp64")),
}, _builder)
@extern.extern
def drcp_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_drcp_rz", core.dtype("fp64")),
}, _builder)
@extern.extern
def drcp_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_drcp_rd", core.dtype("fp64")),
}, _builder)
@extern.extern
def drcp_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_drcp_ru", core.dtype("fp64")),
}, _builder)
@extern.extern
def dsqrt_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_dsqrt_rn", core.dtype("fp64")),
}, _builder)
@extern.extern
def dsqrt_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_dsqrt_rz", core.dtype("fp64")),
}, _builder)
@extern.extern
def dsqrt_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_dsqrt_rd", core.dtype("fp64")),
}, _builder)
@extern.extern
def dsqrt_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_dsqrt_ru", core.dtype("fp64")),
}, _builder)
@extern.extern
def sqrt(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_sqrtf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_sqrt", core.dtype("fp64")),
}, _builder)
@extern.extern
def dadd_rn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rn", core.dtype("fp64")),
}, _builder)
@extern.extern
def dadd_rz(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rz", core.dtype("fp64")),
}, _builder)
@extern.extern
def dadd_rd(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rd", core.dtype("fp64")),
}, _builder)
@extern.extern
def dadd_ru(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_ru", core.dtype("fp64")),
}, _builder)
@extern.extern
def dmul_rn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rn", core.dtype("fp64")),
}, _builder)
@extern.extern
def dmul_rz(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rz", core.dtype("fp64")),
}, _builder)
@extern.extern
def dmul_rd(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rd", core.dtype("fp64")),
}, _builder)
@extern.extern
def dmul_ru(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_ru", core.dtype("fp64")),
}, _builder)
@extern.extern
def fadd_rd(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def fadd_ru(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmul_rd(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmul_ru(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def fadd_rn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def fadd_rz(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmul_rn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def fmul_rz(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def double2float_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2float_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def double2float_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2float_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def double2float_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2float_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def double2float_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2float_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def double2int_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2int_rn", core.dtype("int32")),
}, _builder)
@extern.extern
def double2int_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2int_rz", core.dtype("int32")),
}, _builder)
@extern.extern
def double2int_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2int_rd", core.dtype("int32")),
}, _builder)
@extern.extern
def double2int_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2int_ru", core.dtype("int32")),
}, _builder)
@extern.extern
def double2uint_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2uint_rn", core.dtype("int32")),
}, _builder)
@extern.extern
def double2uint_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2uint_rz", core.dtype("int32")),
}, _builder)
@extern.extern
def double2uint_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2uint_rd", core.dtype("int32")),
}, _builder)
@extern.extern
def double2uint_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2uint_ru", core.dtype("int32")),
}, _builder)
@extern.extern
def int2double_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_int2double_rn", core.dtype("fp64")),
(core.dtype("uint32"),): ("__nv_uint2double_rn", core.dtype("fp64")),
}, _builder)
@extern.extern
def float2int_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2int_rn", core.dtype("int32")),
}, _builder)
@extern.extern
def float2int_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2int_rz", core.dtype("int32")),
}, _builder)
@extern.extern
def float2int_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2int_rd", core.dtype("int32")),
}, _builder)
@extern.extern
def float2int_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2int_ru", core.dtype("int32")),
}, _builder)
@extern.extern
def float2uint_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2uint_rn", core.dtype("int32")),
}, _builder)
@extern.extern
def float2uint_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2uint_rz", core.dtype("int32")),
}, _builder)
@extern.extern
def float2uint_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2uint_rd", core.dtype("int32")),
}, _builder)
@extern.extern
def float2uint_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2uint_ru", core.dtype("int32")),
}, _builder)
@extern.extern
def int2float_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_int2float_rn", core.dtype("fp32")),
(core.dtype("uint32"),): ("__nv_uint2float_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def int2float_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_int2float_rz", core.dtype("fp32")),
(core.dtype("uint32"),): ("__nv_uint2float_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def int2float_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_int2float_rd", core.dtype("fp32")),
(core.dtype("uint32"),): ("__nv_uint2float_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def int2float_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_int2float_ru", core.dtype("fp32")),
(core.dtype("uint32"),): ("__nv_uint2float_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def hiloint2double(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_hiloint2double", core.dtype("fp64")),
}, _builder)
@extern.extern
def double2loint(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2loint", core.dtype("int32")),
}, _builder)
@extern.extern
def double2hiint(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2hiint", core.dtype("int32")),
}, _builder)
@extern.extern
def float2ll_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ll_rn", core.dtype("int64")),
}, _builder)
@extern.extern
def float2ll_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ll_rz", core.dtype("int64")),
}, _builder)
@extern.extern
def float2ll_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ll_rd", core.dtype("int64")),
}, _builder)
@extern.extern
def float2ll_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ll_ru", core.dtype("int64")),
}, _builder)
@extern.extern
def float2ull_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ull_rn", core.dtype("int64")),
}, _builder)
@extern.extern
def float2ull_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ull_rz", core.dtype("int64")),
}, _builder)
@extern.extern
def float2ull_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ull_rd", core.dtype("int64")),
}, _builder)
@extern.extern
def float2ull_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float2ull_ru", core.dtype("int64")),
}, _builder)
@extern.extern
def double2ll_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ll_rn", core.dtype("int64")),
}, _builder)
@extern.extern
def double2ll_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ll_rz", core.dtype("int64")),
}, _builder)
@extern.extern
def double2ll_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ll_rd", core.dtype("int64")),
}, _builder)
@extern.extern
def double2ll_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ll_ru", core.dtype("int64")),
}, _builder)
@extern.extern
def double2ull_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ull_rn", core.dtype("int64")),
}, _builder)
@extern.extern
def double2ull_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ull_rz", core.dtype("int64")),
}, _builder)
@extern.extern
def double2ull_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ull_rd", core.dtype("int64")),
}, _builder)
@extern.extern
def double2ull_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double2ull_ru", core.dtype("int64")),
}, _builder)
@extern.extern
def ll2float_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2float_rn", core.dtype("fp32")),
(core.dtype("uint64"),): ("__nv_ull2float_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def ll2float_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2float_rz", core.dtype("fp32")),
(core.dtype("uint64"),): ("__nv_ull2float_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def ll2float_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2float_rd", core.dtype("fp32")),
(core.dtype("uint64"),): ("__nv_ull2float_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def ll2float_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2float_ru", core.dtype("fp32")),
(core.dtype("uint64"),): ("__nv_ull2float_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def ll2double_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2double_rn", core.dtype("fp64")),
(core.dtype("uint64"),): ("__nv_ull2double_rn", core.dtype("fp64")),
}, _builder)
@extern.extern
def ll2double_rz(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2double_rz", core.dtype("fp64")),
(core.dtype("uint64"),): ("__nv_ull2double_rz", core.dtype("fp64")),
}, _builder)
@extern.extern
def ll2double_rd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2double_rd", core.dtype("fp64")),
(core.dtype("uint64"),): ("__nv_ull2double_rd", core.dtype("fp64")),
}, _builder)
@extern.extern
def ll2double_ru(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int64"),): ("__nv_ll2double_ru", core.dtype("fp64")),
(core.dtype("uint64"),): ("__nv_ull2double_ru", core.dtype("fp64")),
}, _builder)
@extern.extern
def int_as_float(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_int_as_float", core.dtype("fp32")),
(core.dtype("uint32"),): ("__nv_uint_as_float", core.dtype("fp32")),
}, _builder)
@extern.extern
def float_as_int(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float_as_int", core.dtype("int32")),
}, _builder)
@extern.extern
def float_as_uint(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_float_as_uint", core.dtype("int32")),
}, _builder)
@extern.extern
def longlong_as_double(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int64"),): ("__nv_longlong_as_double", core.dtype("fp64")),
}, _builder)
@extern.extern
def double_as_longlong(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_double_as_longlong", core.dtype("int64")),
}, _builder)
@extern.extern
def fast_sinf(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_sinf", core.dtype("fp32")),
}, _builder)
@extern.extern
def fast_cosf(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_cosf", core.dtype("fp32")),
}, _builder)
@extern.extern
def fast_log2f(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_log2f", core.dtype("fp32")),
}, _builder)
@extern.extern
def fast_logf(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_logf", core.dtype("fp32")),
}, _builder)
@extern.extern
def fast_expf(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_expf", core.dtype("fp32")),
}, _builder)
@extern.extern
def fast_tanf(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_tanf", core.dtype("fp32")),
}, _builder)
@extern.extern
def fast_exp10f(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_exp10f", core.dtype("fp32")),
}, _builder)
@extern.extern
def fast_log10f(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_fast_log10f", core.dtype("fp32")),
}, _builder)
@extern.extern
def pow(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fast_powf", core.dtype("fp32")),
(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_powf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_pow", core.dtype("fp64")),
}, _builder)
@extern.extern
def hadd(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_hadd", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_uhadd", core.dtype("uint32")),
}, _builder)
@extern.extern
def rhadd(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("int32"),): ("__nv_rhadd", core.dtype("int32")),
(core.dtype("uint32"), core.dtype("uint32"),): ("__nv_urhadd", core.dtype("uint32")),
}, _builder)
@extern.extern
def fsub_rn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def fsub_rz(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rz", core.dtype("fp32")),
}, _builder)
@extern.extern
def fsub_rd(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rd", core.dtype("fp32")),
}, _builder)
@extern.extern
def fsub_ru(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_ru", core.dtype("fp32")),
}, _builder)
@extern.extern
def frsqrt_rn(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_frsqrt_rn", core.dtype("fp32")),
}, _builder)
@extern.extern
def ffs(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("int32"),): ("__nv_ffs", core.dtype("int32")),
(core.dtype("int64"),): ("__nv_ffsll", core.dtype("int32")),
}, _builder)
@extern.extern
def rint(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_rintf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_rint", core.dtype("fp64")),
}, _builder)
@extern.extern
def llrint(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_llrintf", core.dtype("int64")),
(core.dtype("fp64"),): ("__nv_llrint", core.dtype("int64")),
}, _builder)
@extern.extern
def nearbyint(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_nearbyintf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_nearbyint", core.dtype("fp64")),
}, _builder)
@extern.extern
def isnanf(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_isnanf", core.dtype("int32")),
}, _builder)
@extern.extern
def signbitf(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_signbitf", core.dtype("int32")),
}, _builder)
@extern.extern
def copysign(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_copysignf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_copysign", core.dtype("fp64")),
}, _builder)
@extern.extern
def finitef(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_finitef", core.dtype("int32")),
}, _builder)
@extern.extern
def isinff(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_isinff", core.dtype("int32")),
}, _builder)
@extern.extern
def nextafter(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_nextafterf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_nextafter", core.dtype("fp64")),
}, _builder)
@extern.extern
def sin(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_sinf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_sin", core.dtype("fp64")),
}, _builder)
@extern.extern
def cos(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_cosf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cos", core.dtype("fp64")),
}, _builder)
@extern.extern
def sinpi(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_sinpif", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_sinpi", core.dtype("fp64")),
}, _builder)
@extern.extern
def cospi(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_cospif", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cospi", core.dtype("fp64")),
}, _builder)
@extern.extern
def tan(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_tanf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_tan", core.dtype("fp64")),
}, _builder)
@extern.extern
def log2(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_log2f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_log2", core.dtype("fp64")),
}, _builder)
@extern.extern
def exp(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_expf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_exp", core.dtype("fp64")),
}, _builder)
@extern.extern
def exp10(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_exp10f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_exp10", core.dtype("fp64")),
}, _builder)
@extern.extern
def cosh(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_coshf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cosh", core.dtype("fp64")),
}, _builder)
@extern.extern
def sinh(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_sinhf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_sinh", core.dtype("fp64")),
}, _builder)
@extern.extern
def tanh(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_tanhf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_tanh", core.dtype("fp64")),
}, _builder)
@extern.extern
def atan2(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_atan2f", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_atan2", core.dtype("fp64")),
}, _builder)
@extern.extern
def atan(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_atanf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_atan", core.dtype("fp64")),
}, _builder)
@extern.extern
def asin(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_asinf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_asin", core.dtype("fp64")),
}, _builder)
@extern.extern
def acos(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_acosf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_acos", core.dtype("fp64")),
}, _builder)
@extern.extern
def log(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_logf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_log", core.dtype("fp64")),
}, _builder)
@extern.extern
def log10(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_log10f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_log10", core.dtype("fp64")),
}, _builder)
@extern.extern
def log1p(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_log1pf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_log1p", core.dtype("fp64")),
}, _builder)
@extern.extern
def acosh(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_acoshf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_acosh", core.dtype("fp64")),
}, _builder)
@extern.extern
def asinh(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_asinhf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_asinh", core.dtype("fp64")),
}, _builder)
@extern.extern
def atanh(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_atanhf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_atanh", core.dtype("fp64")),
}, _builder)
@extern.extern
def expm1(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_expm1f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_expm1", core.dtype("fp64")),
}, _builder)
@extern.extern
def hypot(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_hypotf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_hypot", core.dtype("fp64")),
}, _builder)
@extern.extern
def rhypot(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rhypotf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rhypot", core.dtype("fp64")),
}, _builder)
@extern.extern
def norm3d(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_norm3df", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_norm3d", core.dtype("fp64")),
}, _builder)
@extern.extern
def rnorm3d(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rnorm3df", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rnorm3d", core.dtype("fp64")),
}, _builder)
@extern.extern
def norm4d(arg0, arg1, arg2, arg3, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, arg3, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_norm4df", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_norm4d", core.dtype("fp64")),
}, _builder)
@extern.extern
def rnorm4d(arg0, arg1, arg2, arg3, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, arg3, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rnorm4df", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rnorm4d", core.dtype("fp64")),
}, _builder)
@extern.extern
def cbrt(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_cbrtf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cbrt", core.dtype("fp64")),
}, _builder)
@extern.extern
def rcbrt(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_rcbrtf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_rcbrt", core.dtype("fp64")),
}, _builder)
@extern.extern
def j0(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_j0f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_j0", core.dtype("fp64")),
}, _builder)
@extern.extern
def j1(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_j1f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_j1", core.dtype("fp64")),
}, _builder)
@extern.extern
def y0(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_y0f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_y0", core.dtype("fp64")),
}, _builder)
@extern.extern
def y1(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_y1f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_y1", core.dtype("fp64")),
}, _builder)
@extern.extern
def yn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("fp32"),): ("__nv_ynf", core.dtype("fp32")),
(core.dtype("int32"), core.dtype("fp64"),): ("__nv_yn", core.dtype("fp64")),
}, _builder)
@extern.extern
def jn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("int32"), core.dtype("fp32"),): ("__nv_jnf", core.dtype("fp32")),
(core.dtype("int32"), core.dtype("fp64"),): ("__nv_jn", core.dtype("fp64")),
}, _builder)
@extern.extern
def cyl_bessel_i0(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_cyl_bessel_i0f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cyl_bessel_i0", core.dtype("fp64")),
}, _builder)
@extern.extern
def cyl_bessel_i1(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_cyl_bessel_i1f", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_cyl_bessel_i1", core.dtype("fp64")),
}, _builder)
@extern.extern
def erf(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_erff", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_erf", core.dtype("fp64")),
}, _builder)
@extern.extern
def erfinv(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_erfinvf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_erfinv", core.dtype("fp64")),
}, _builder)
@extern.extern
def erfc(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_erfcf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_erfc", core.dtype("fp64")),
}, _builder)
@extern.extern
def erfcx(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_erfcxf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_erfcx", core.dtype("fp64")),
}, _builder)
@extern.extern
def erfcinv(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_erfcinvf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_erfcinv", core.dtype("fp64")),
}, _builder)
@extern.extern
def normcdfinv(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_normcdfinvf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_normcdfinv", core.dtype("fp64")),
}, _builder)
@extern.extern
def normcdf(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_normcdff", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_normcdf", core.dtype("fp64")),
}, _builder)
@extern.extern
def lgamma(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_lgammaf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_lgamma", core.dtype("fp64")),
}, _builder)
@extern.extern
def ldexp(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("int32"),): ("__nv_ldexpf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("int32"),): ("__nv_ldexp", core.dtype("fp64")),
}, _builder)
@extern.extern
def scalbn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("int32"),): ("__nv_scalbnf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("int32"),): ("__nv_scalbn", core.dtype("fp64")),
}, _builder)
@extern.extern
def fmod(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmodf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmod", core.dtype("fp64")),
}, _builder)
@extern.extern
def remainder(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_remainderf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_remainder", core.dtype("fp64")),
}, _builder)
@extern.extern
def fma(arg0, arg1, arg2, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ],
{(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma", core.dtype("fp64")),
}, _builder)
@extern.extern
def powi(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("int32"),): ("__nv_powif", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("int32"),): ("__nv_powi", core.dtype("fp64")),
}, _builder)
@extern.extern
def tgamma(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_tgammaf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_tgamma", core.dtype("fp64")),
}, _builder)
@extern.extern
def round(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_roundf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_round", core.dtype("fp64")),
}, _builder)
@extern.extern
def llround(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_llroundf", core.dtype("int64")),
(core.dtype("fp64"),): ("__nv_llround", core.dtype("int64")),
}, _builder)
@extern.extern
def fdim(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdimf", core.dtype("fp32")),
(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fdim", core.dtype("fp64")),
}, _builder)
@extern.extern
def ilogb(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_ilogbf", core.dtype("int32")),
(core.dtype("fp64"),): ("__nv_ilogb", core.dtype("int32")),
}, _builder)
@extern.extern
def logb(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp32"),): ("__nv_logbf", core.dtype("fp32")),
(core.dtype("fp64"),): ("__nv_logb", core.dtype("fp64")),
}, _builder)
@extern.extern
def signbitd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_signbitd", core.dtype("int32")),
}, _builder)
@extern.extern
def isfinited(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_isfinited", core.dtype("int32")),
}, _builder)
@extern.extern
def isinfd(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_isinfd", core.dtype("int32")),
}, _builder)
@extern.extern
def isnand(arg0, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ],
{(core.dtype("fp64"),): ("__nv_isnand", core.dtype("int32")),
}, _builder)
@extern.extern
def dsub_rn(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rn", core.dtype("fp64")),
}, _builder)
@extern.extern
def dsub_rz(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rz", core.dtype("fp64")),
}, _builder)
@extern.extern
def dsub_ru(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_ru", core.dtype("fp64")),
}, _builder)
@extern.extern
def dsub_rd(arg0, arg1, _builder=None):
return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ],
{(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rd", core.dtype("fp64")),
}, _builder)
| triton-master | python/triton/language/libdevice.py |
from __future__ import annotations # remove after python 3.11
from typing import List, Optional, Tuple
from . import core as tl
from triton._C.libtriton.triton import ir
# Create custom exception that prints message "hello"
class IncompatibleTypeErrorimpl(Exception):
def __init__(self, type_a, type_b):
self.type_a = type_a
self.type_b = type_b
self.message = "invalid operands of type " + self.type_a.__repr__() + " and " + self.type_b.__repr__()
super(IncompatibleTypeErrorimpl, self).__init__(self.message)
# ===----------------------------------------------------------------------===##
# Programming Model
# ===----------------------------------------------------------------------===##
def program_id(axis: int, builder: ir.builder) -> tl.tensor:
return tl.tensor(builder.create_get_program_id(axis), tl.int32)
def num_programs(axis: int, builder: ir.builder) -> tl.tensor:
return tl.tensor(builder.create_get_num_programs(axis), tl.int32)
# ===----------------------------------------------------------------------===//
# Implicit Casting Utilities
# ===----------------------------------------------------------------------===//
def integer_promote_impl(a_ty: tl.dtype, b_ty: tl.dtype) -> tl.dtype:
a_rank = a_ty.int_bitwidth
b_rank = b_ty.int_bitwidth
a_sn = a_ty.int_signedness
b_sn = b_ty.int_signedness
# Rules for signedness taken from "Usual arithmetic conversions" on
# https://en.cppreference.com/w/c/language/conversion.
if a_sn == b_sn:
return a_ty if a_rank > b_rank else b_ty
elif a_sn == tl.dtype.SIGNEDNESS.UNSIGNED:
return a_ty if a_rank >= b_rank else b_ty
elif b_sn == tl.dtype.SIGNEDNESS.UNSIGNED:
return b_ty if b_rank >= a_rank else a_ty
assert False
def computation_type_impl(a_ty: tl.dtype, b_ty: tl.dtype, div_or_mod: bool) -> tl.dtype:
# 1) if one operand is double, the other is implicitly
# converted to double
if a_ty.is_fp64() or b_ty.is_fp64():
return tl.float64
# 2) if one operand is float, the other is implicitly
# converted to float
if a_ty.is_fp32() or b_ty.is_fp32():
return tl.float32
# 3 ) if one operand is half, the other is implicitly converted to half
# unless we're doing / or %, which do not exist natively in PTX for fp16.
# Supported PTX op: add, sub, mul, fma, neg, abs, min, max, tanh, ex2, setp
if a_ty.is_fp16() or b_ty.is_fp16():
if div_or_mod:
return tl.float32
else:
return tl.float16
# 4) return bf16 only if both operands are of bf16
if a_ty.is_bf16() or b_ty.is_bf16():
if div_or_mod:
return tl.float32
if a_ty.is_bf16() and b_ty.is_bf16():
return tl.bfloat16
return tl.float32
if not a_ty.is_int() or not b_ty.is_int():
assert False
# 5 ) both operands are integer and undergo
# integer promotion
if div_or_mod and a_ty.int_signedness != b_ty.int_signedness:
raise ValueError("Cannot use /, #, or % with " + a_ty.__repr__() + " and " + b_ty.__repr__() + " because they have different signedness;"
"this is unlikely to result in a useful answer. Cast them to the same signedness.")
return integer_promote_impl(a_ty, b_ty)
# ===----------------------------------------------------------------------===//
# Binary Operators
# ===----------------------------------------------------------------------===//
def check_ptr_type_impl(type_a: tl.dtype, type_b: tl.dtype, allow_ptr_a: bool) -> None:
if type_a.is_ptr():
if not allow_ptr_a:
raise IncompatibleTypeErrorimpl(type_a, type_b)
# T* + U* with T != U
if type_b.is_ptr() and (type_a != type_b):
raise IncompatibleTypeErrorimpl(type_a, type_b)
# T* + float
if type_b.is_floating():
raise IncompatibleTypeErrorimpl(type_a, type_b)
def binary_op_type_checking_impl(lhs: tl.tensor,
rhs: tl.tensor,
builder: ir.builder,
allow_lhs_ptr=False, allow_rhs_ptr=False,
arithmetic_check=True, div_or_mod=False
) -> Tuple[tl.tensor, tl.tensor]:
# implicit broadcasting
lhs, rhs = broadcast_impl_value(lhs, rhs, builder)
# implicit typecasting
lhs_sca_ty = lhs.type.scalar
rhs_sca_ty = rhs.type.scalar
check_ptr_type_impl(lhs_sca_ty, rhs_sca_ty, allow_lhs_ptr)
check_ptr_type_impl(rhs_sca_ty, lhs_sca_ty, allow_rhs_ptr)
if arithmetic_check and not lhs_sca_ty.is_ptr() and not rhs_sca_ty.is_ptr():
ret_sca_ty = computation_type_impl(lhs_sca_ty, rhs_sca_ty, div_or_mod)
lhs = cast(lhs, ret_sca_ty, builder)
rhs = cast(rhs, ret_sca_ty, builder)
return lhs, rhs
def add(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder, True, True)
input_scalar_ty = input.type.scalar
other_scalar_ty = other.type.scalar
# offset + ptr
# ptr + offset
if other_scalar_ty.is_ptr() and not input_scalar_ty.is_ptr():
input, other = other, input
if input_scalar_ty.is_ptr():
return tl.tensor(builder.create_gep(input.handle, [other.handle]), input.type)
# float + float
elif input_scalar_ty.is_floating():
return tl.tensor(builder.create_fadd(input.handle, other.handle), input.type)
# int + int
elif input_scalar_ty.is_int():
return tl.tensor(builder.create_add(input.handle, other.handle), input.type)
assert False
def sub(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder, True, False)
scalar_ty = input.type.scalar
# ptr - offset
if scalar_ty.is_ptr():
return tl.tensor(builder.create_gep(input.handle, [minus(other, builder).handle]),
input.type)
# float - float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fsub(input.handle, other.handle), input.type)
# int - int
elif scalar_ty.is_int():
return tl.tensor(builder.create_sub(input.handle, other.handle), input.type)
assert False
def mul(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder)
scalar_ty = input.type.scalar
# float * float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fmul(input.handle, other.handle), input.type)
# * int
elif scalar_ty.is_int():
return tl.tensor(builder.create_mul(input.handle, other.handle), input.type)
assert False
def truediv(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True)
input_scalar_ty = input.type.scalar
other_scalar_ty = other.type.scalar
# float / int
if input_scalar_ty.is_floating() and other_scalar_ty.is_int():
other = cast(other, input_scalar_ty, builder)
# int / float
elif input_scalar_ty.is_int() and other_scalar_ty.is_floating():
input = cast(input, other_scalar_ty, builder)
# int / int (cast to tl.float32)
elif input_scalar_ty.is_int() and other_scalar_ty.is_int():
input = cast(input, tl.float32, builder)
other = cast(other, tl.float32, builder)
# float / float (cast to highest exponent type)
elif input_scalar_ty.is_floating() and other_scalar_ty.is_floating():
if input_scalar_ty.fp_mantissa_width > other_scalar_ty.fp_mantissa_width:
other = cast(other, input_scalar_ty, builder)
else:
input = cast(input, other_scalar_ty, builder)
# unreachable
else:
assert False
return tl.tensor(builder.create_fdiv(input.handle, other.handle), input.type)
def floordiv(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True)
input_scalar_ty = input.type.scalar
other_scalar_ty = other.type.scalar
if input_scalar_ty.is_int() and other_scalar_ty.is_int():
ret_ty = integer_promote_impl(input_scalar_ty, other_scalar_ty)
input = cast(input, ret_ty, builder)
other = cast(other, ret_ty, builder)
if ret_ty.is_int_signed():
return tl.tensor(builder.create_sdiv(input.handle, other.handle), input.type)
else:
return tl.tensor(builder.create_udiv(input.handle, other.handle), input.type)
assert False
def fdiv(input: tl.tensor,
other: tl.tensor,
ieee_rounding: bool,
builder: ir.builder) -> tl.tensor:
input_scalar_ty = input.type.scalar
other_scalar_ty = other.type.scalar
if not input_scalar_ty.is_floating() or not other_scalar_ty.is_floating():
raise ValueError("both operands of fdiv must have floating poscalar type")
input, other = binary_op_type_checking_impl(input, other, builder, False, False, False, True)
ret = builder.create_fdiv(input.handle, other.handle)
ret.set_fdiv_ieee_rounding(ieee_rounding)
return tl.tensor(ret, input.type)
def mod(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder, False, False, True, True)
scalar_ty = input.type.scalar
other_scalar_ty = other.type.scalar
# float % float
if scalar_ty.is_floating():
return tl.tensor(builder.create_frem(input.handle, other.handle), input.type)
# % int
elif scalar_ty.is_int():
if scalar_ty.int_signedness != other_scalar_ty.int_signedness:
raise ValueError("Cannot mod " + scalar_ty.__repr__() + " by " + other_scalar_ty.__repr__() + " "
"because they have different signedness;"
"this is unlikely to result in a useful answer. Cast them to the same signedness.")
if scalar_ty.is_int_signed():
return tl.tensor(builder.create_srem(input.handle, other.handle), input.type)
else:
return tl.tensor(builder.create_urem(input.handle, other.handle), input.type)
assert False
##############
# bitwise ops
##############
def bitwise_op_type_checking_impl(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> Tuple[tl.tensor, tl.tensor]:
input, other = binary_op_type_checking_impl(input, other, builder, False, False, False)
input_sca_ty = input.type.scalar
other_sca_ty = other.type.scalar
if not input_sca_ty.is_int() or not other_sca_ty.is_int():
raise IncompatibleTypeErrorimpl(input_sca_ty, other_sca_ty)
ret_sca_ty = integer_promote_impl(input_sca_ty, other_sca_ty)
if ret_sca_ty != input_sca_ty:
input = cast(input, ret_sca_ty, builder)
if ret_sca_ty != other_sca_ty:
other = cast(other, ret_sca_ty, builder)
return input, other
def and_(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = bitwise_op_type_checking_impl(input, other, builder)
return tl.tensor(builder.create_and(input.handle, other.handle), input.type)
def or_(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = bitwise_op_type_checking_impl(input, other, builder)
return tl.tensor(builder.create_or(input.handle, other.handle), input.type)
def xor_(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = bitwise_op_type_checking_impl(input, other, builder)
return tl.tensor(builder.create_xor(input.handle, other.handle), input.type)
def lshr(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = bitwise_op_type_checking_impl(input, other, builder)
return tl.tensor(builder.create_lshr(input.handle, other.handle), input.type)
def shl(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = bitwise_op_type_checking_impl(input, other, builder)
return tl.tensor(builder.create_shl(input.handle, other.handle), input.type)
# ===----------------------------------------------------------------------===//
# Unary Operators
# ===----------------------------------------------------------------------===//
def plus(input: tl.tensor) -> tl.tensor:
return input
def minus(input: tl.tensor,
builder: ir.builder) -> tl.tensor:
input_sca_ty = input.type.scalar
if input_sca_ty.is_ptr():
raise ValueError("wrong type argument to unary minus (" + input_sca_ty.__repr__() + ")")
_0 = tl.tensor(ir.constant.get_null_value(input_sca_ty.to_ir(builder)), input_sca_ty)
return sub(_0, input, builder)
def invert(input: tl.tensor,
builder: tl.tensor) -> tl.tensor:
input_sca_ty = input.type.scalar
if input_sca_ty.is_ptr() or input_sca_ty.is_floating():
raise ValueError("wrong type argument to unary invert (" + input_sca_ty.__repr__() + ")")
_1 = tl.tensor(ir.constant.get_all_ones_value(input_sca_ty.to_ir(builder)), input_sca_ty)
return xor_(input, _1, builder)
# ===----------------------------------------------------------------------===//
# Comparison Operators
# ===----------------------------------------------------------------------===//
def _bool_like(v: tl.tensor) -> tl.block_type:
if not v.type.is_block():
return tl.int1
shape = v.type.shape
return tl.block_type(tl.int1, shape)
def greater_than(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder)
scalar_ty = input.type.scalar
# float > float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fcmpOGT(input.handle, other.handle), _bool_like(input))
# > int
elif scalar_ty.is_int():
if scalar_ty.is_int_signed():
return tl.tensor(builder.create_icmpSGT(input.handle, other.handle), _bool_like(input))
else:
return tl.tensor(builder.create_icmpUGT(input.handle, other.handle), _bool_like(input))
assert False
def greater_equal(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder)
scalar_ty = input.type.scalar
# float >= float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fcmpOGE(input.handle, other.handle), _bool_like(input))
# >= int
elif scalar_ty.is_int():
if scalar_ty.is_int_signed():
return tl.tensor(builder.create_icmpSGE(input.handle, other.handle), _bool_like(input))
else:
return tl.tensor(builder.create_icmpUGE(input.handle, other.handle), _bool_like(input))
assert False
def less_than(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder)
scalar_ty = input.type.scalar
# float < float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fcmpOLT(input.handle, other.handle), _bool_like(input))
# < int
elif scalar_ty.is_int():
if scalar_ty.is_int_signed():
return tl.tensor(builder.create_icmpSLT(input.handle, other.handle), _bool_like(input))
else:
return tl.tensor(builder.create_icmpULT(input.handle, other.handle), _bool_like(input))
assert False
def less_equal(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder)
scalar_ty = input.type.scalar
# float < float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fcmpOLE(input.handle, other.handle), _bool_like(input))
# < int
elif scalar_ty.is_int():
if scalar_ty.is_int_signed():
return tl.tensor(builder.create_icmpSLE(input.handle, other.handle), _bool_like(input))
else:
return tl.tensor(builder.create_icmpULE(input.handle, other.handle), _bool_like(input))
assert False
def equal(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder)
scalar_ty = input.type.scalar
# float == float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fcmpOEQ(input.handle, other.handle), _bool_like(input))
# == int
elif scalar_ty.is_int():
return tl.tensor(builder.create_icmpEQ(input.handle, other.handle), _bool_like(input))
assert False
def not_equal(input: tl.tensor,
other: tl.tensor,
builder: ir.builder) -> tl.tensor:
input, other = binary_op_type_checking_impl(input, other, builder)
scalar_ty = input.type.scalar
# float == float
if scalar_ty.is_floating():
return tl.tensor(builder.create_fcmpUNE(input.handle, other.handle), _bool_like(input))
# == int
elif scalar_ty.is_int():
return tl.tensor(builder.create_icmpNE(input.handle, other.handle), _bool_like(input))
assert False
# ===----------------------------------------------------------------------===//
# Block Creation
# ===----------------------------------------------------------------------===//
def arange(start: int, end: int, builder: ir.builder) -> tl.tensor:
if not isinstance(start, int) or not isinstance(end, int):
raise ValueError("arange's arguments must be of type tl.constexpr")
shape = [end - start]
ret_ty = tl.block_type(tl.int32, shape)
return tl.tensor(builder.get_range(start, end), ret_ty)
def zeros(shape: List[int], dtype: tl.dtype, builder: ir.builder) -> tl.tensor:
_0 = ir.constant.get_null_value(dtype.to_ir(builder))
ret_ty = tl.block_type(dtype, shape)
return tl.tensor(builder.create_splat(_0, shape), ret_ty)
# ===----------------------------------------------------------------------===//
# Shape Manipulation
# ===----------------------------------------------------------------------===//
def reshape(input: tl.tensor,
dst_shape: List[int],
builder: ir.builder) -> tl.tensor:
numel = 1
for s in dst_shape:
numel *= s
if input.type.numel != numel:
raise ValueError("cannot reshape block of different shape")
ret_ty = tl.block_type(input.type.scalar, dst_shape)
return tl.tensor(builder.create_reshape(input.handle, dst_shape), ret_ty)
def cat(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder) -> tl.tensor:
assert lhs.type.is_block() and rhs.type.is_block()
assert lhs.type.shape[1:] == rhs.type.shape[1:]
ret_shape = [lhs.type.shape[0] + rhs.type.shape[0]]
ret_ty = tl.block_type(lhs.type.scalar, ret_shape)
return tl.tensor(builder.create_cat(lhs.handle, rhs.handle), ret_ty)
def broadcast_impl_shape(input: tl.tensor,
shape: List[int],
builder: ir.builder) -> tl.tensor:
if not input.type.is_block():
ret_ty = tl.block_type(input.type, shape)
return tl.tensor(builder.create_splat(input.handle, shape), ret_ty)
src_shape = input.type.get_block_shapes()
if len(src_shape) != len(shape):
raise ValueError(f"Cannot broadcast, rank mismatch: {src_shape}, {shape}")
if shape == src_shape:
return input
for i in range(len(src_shape)):
if shape[i] != src_shape[i] and src_shape[i] != 1:
raise ValueError(f"Cannot broadcast, the expanded size of the tensor ({shape[i]})"
f" must match the existing size ({src_shape[1]}) at non-singleton dimension"
f" {i}: {src_shape}, {shape}")
ret_ty = tl.block_type(input.type.scalar, shape)
return tl.tensor(builder.create_broadcast(input.handle, shape), ret_ty)
def broadcast_impl_value(lhs: tl.tensor,
rhs: tl.tensor,
builder: ir.builder) -> tl.tensor:
lhs_ty = lhs.type
rhs_ty = rhs.type
# make_shape_compatible(block, scalar)
if lhs_ty.is_block() and not rhs_ty.is_block():
rhs_ty = tl.block_type(rhs_ty.scalar, lhs_ty.shape)
rhs = tl.tensor(builder.create_splat(rhs.handle, lhs_ty.get_block_shapes()), rhs_ty)
# make_shape_compatible(scalar, block)
elif not lhs_ty.is_block() and rhs_ty.is_block():
lhs_ty = tl.block_type(lhs_ty.scalar, rhs_ty.shape)
lhs = tl.tensor(builder.create_splat(lhs.handle, rhs_ty.get_block_shapes()), lhs_ty)
# make_shape_compatible(block, block)
elif lhs_ty.is_block() and rhs_ty.is_block():
lhs_shape = lhs_ty.get_block_shapes()
rhs_shape = rhs_ty.get_block_shapes()
if len(lhs_shape) != len(rhs_shape):
raise ValueError("Cannot make_shape_compatible: blocks must have the same rank")
ret_shape = []
for i in range(len(lhs_shape)):
left = lhs_shape[i]
right = rhs_shape[i]
if left == 1:
ret_shape.append(right)
elif right == 1:
ret_shape.append(left)
elif left == right:
ret_shape.append(left)
else:
raise ValueError("Cannot make_shape_compatible: incompatible dimensions "
"at index " + str(i) + ": " + str(left) + " and " + str(right))
if lhs_shape != ret_shape:
ret_ty = tl.block_type(lhs_ty.scalar, ret_shape)
lhs = tl.tensor(builder.create_broadcast(lhs.handle, ret_shape), ret_ty)
if rhs_shape != ret_shape:
ret_ty = tl.block_type(rhs_ty.scalar, ret_shape)
rhs = tl.tensor(builder.create_broadcast(rhs.handle, ret_shape), ret_ty)
# (scalar, scalar) => returns original blocks
return lhs, rhs
#######
# cast
#######
def bitcast(input: tl.tensor,
dst_ty: tl.dtype,
builder: ir.builder) -> tl.tensor:
src_ty = input.type
if src_ty.is_block():
dst_ty = tl.block_type(dst_ty, input.type.get_block_shapes())
if src_ty == dst_ty:
return input
src_sca_ty = src_ty.scalar
dst_sca_ty = dst_ty.scalar
if src_sca_ty.is_ptr() or dst_sca_ty.is_ptr():
return cast(input, dst_ty, builder)
# Bitcast
src_bits = src_sca_ty.primitive_bitwidth
dst_bits = dst_sca_ty.primitive_bitwidth
if src_bits != dst_bits:
raise ValueError("Cannot bitcast data-type of size " + str(src_bits) + "to "
"data-type of size " + str(dst_bits))
return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)),
dst_ty)
def cast(input: tl.tensor,
dst_ty: tl.dtype,
builder: ir.builder) -> tl.tensor:
src_ty = input.type
if src_ty.is_block() and not dst_ty.is_block():
dst_ty = tl.block_type(dst_ty, input.type.get_block_shapes())
if src_ty == dst_ty:
return input
src_sca_ty = src_ty.scalar
dst_sca_ty = dst_ty.scalar
# bf16 <=> (not fp32)
if (src_sca_ty.is_bf16() and not dst_sca_ty.is_fp32()) or \
(dst_sca_ty.is_bf16() and not src_sca_ty.is_fp32()):
return cast(cast(input, tl.float32, builder), dst_sca_ty, builder)
# FP Truncation
truncate_fp = src_sca_ty.is_floating() and \
dst_sca_ty.is_floating() and \
src_sca_ty.fp_mantissa_width > dst_sca_ty.fp_mantissa_width
if truncate_fp:
return tl.tensor(builder.create_fp_trunc(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
# FP Extension
ext_fp = src_sca_ty.is_floating() and \
dst_sca_ty.is_floating() and \
src_sca_ty.fp_mantissa_width < dst_sca_ty.fp_mantissa_width
if ext_fp:
return tl.tensor(builder.create_fp_ext(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
# Int cast
if src_sca_ty.is_int() and dst_sca_ty.is_int() and \
(src_sca_ty.int_bitwidth != dst_sca_ty.int_bitwidth or src_sca_ty.int_signedness != dst_sca_ty.int_signedness):
sign_extend = src_sca_ty.is_int_signed() and not src_sca_ty.is_bool()
return tl.tensor(builder.create_int_cast(input.handle,
dst_ty.to_ir(builder), sign_extend),
dst_ty)
# Float to Int
if src_sca_ty.is_floating() and dst_sca_ty.is_int():
# TODO: is this correct?
if dst_sca_ty.is_bool():
return not_equal(input, tl._to_tensor(0, builder), builder)
else:
return tl.tensor(builder.create_fp_to_si(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
# int => float
if src_sca_ty.is_int() and dst_sca_ty.is_floating():
if src_sca_ty.is_bool() or not src_sca_ty.is_int_signed():
return tl.tensor(builder.create_ui_to_fp(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
else:
return tl.tensor(builder.create_si_to_fp(input.handle,
dst_ty.to_ir(builder)),
dst_ty)
# ptr => int
if src_sca_ty.is_ptr() and dst_sca_ty.is_int():
bitwidth = dst_sca_ty.int_bitwidth
if bitwidth == 64:
return tl.tensor(builder.create_ptr_to_int(input.handle, dst_ty.to_ir(builder)),
dst_ty)
if bitwidth == 1:
return not_equal(cast(input, tl.int64, builder),
tl.tensor(builder.get_int64(0), tl.int64),
builder)
if not src_sca_ty.is_ptr() and dst_sca_ty.is_ptr():
return tl.tensor(builder.create_int_to_ptr(input.handle, dst_ty.to_ir(builder)), dst_ty)
# Ptr . Ptr
if src_sca_ty.is_ptr() and dst_sca_ty.is_ptr():
return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty)
# * . Bool
if dst_sca_ty.is_bool():
if src_sca_ty.is_ptr():
input = cast(input, tl.int64, builder)
other = builder.get_int64(0)
if src_ty.is_bool():
other = builder.create_splat(other, src_ty.get_block_shapes())
return tl.tensor(builder.create_icmpNE(input.handle, other), dst_ty)
assert False, f'cannot cast {input} to {dst_ty}'
# ===----------------------------------------------------------------------===//
# Memory Operators
# ===----------------------------------------------------------------------===//
def _parse_eviction_policy(eviction_policy):
eviction = ir.EVICTION_POLICY.NORMAL # default
if eviction_policy:
if eviction_policy == "evict_last":
eviction = ir.EVICTION_POLICY.EVICT_LAST
elif eviction_policy == "evict_first":
eviction = ir.EVICTION_POLICY.EVICT_FIRST
else:
raise ValueError(f"Eviction policy {eviction_policy} not supported")
return eviction
def load(ptr: tl.tensor,
mask: Optional[tl.tensor],
other: Optional[tl.tensor],
cache_modifier: str,
eviction_policy: str,
is_volatile: bool,
builder: ir.builder) -> tl.tensor:
if not ptr.type.scalar.is_ptr():
raise ValueError("Pointer argument of load instruction is " + ptr.type.__repr__())
if ptr.type.is_block():
if mask:
mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder)
if other:
other = broadcast_impl_shape(other, ptr.type.get_block_shapes(), builder)
if other:
other = cast(other, ptr.type.scalar.element_ty, builder)
ptr_ty = ptr.type.scalar
elt_ty = ptr_ty.element_ty
# treat bool* as tl.int8*
if elt_ty == tl.int1:
elt_ty = tl.int8
ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space)
ptr = cast(ptr, ptr_ty, builder)
# cache modifier
cache = ir.CACHE_MODIFIER.NONE # default
if cache_modifier:
if cache_modifier == ".ca":
cache = ir.CACHE_MODIFIER.CA
elif cache_modifier == ".cg":
cache = ir.CACHE_MODIFIER.CG
else:
raise ValueError(f"Cache modifier {cache_modifier} not supported")
# eviction policy
eviction = _parse_eviction_policy(eviction_policy)
if ptr.type.is_block():
shape = ptr.type.get_block_shapes()
dst_ty = tl.block_type(elt_ty, shape)
else:
dst_ty = elt_ty
if not mask and not other:
return tl.tensor(builder.create_load(ptr.handle, cache, eviction, is_volatile),
dst_ty)
if not mask:
raise ValueError("`other` cannot be provided without `mask`")
if not other:
other_ir = ir.undef.get(elt_ty.to_ir(builder))
if ptr.type.is_block():
other_ir = builder.create_splat(other_ir, ptr.type.get_block_shapes())
other = tl.tensor(other_ir, dst_ty)
return tl.tensor(builder.create_masked_load(ptr.handle,
mask.handle,
other.handle,
cache, eviction, is_volatile),
dst_ty)
def store(ptr: tl.tensor,
val: tl.tensor,
mask: Optional[tl.tensor],
eviction_policy: str,
builder: ir.builder) -> tl.tensor:
if not ptr.type.scalar.is_ptr():
raise ValueError("Pointer argument of store instruction is " + ptr.type.__repr__())
if ptr.type.is_block():
val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder)
if mask:
mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder)
ptr_ty = ptr.type.scalar
elt_ty = ptr_ty.element_ty
# treat bool* as tl.int8*
if elt_ty == tl.int1:
elt_ty_ptr = tl.int8
ptr_ty = tl.pointer_type(elt_ty_ptr, ptr_ty.address_space)
ptr = cast(ptr, ptr_ty, builder)
# eviction policy
eviction = _parse_eviction_policy(eviction_policy)
# cast to target data-type
val = cast(val, elt_ty, builder)
if not mask:
return tl.tensor(builder.create_store(ptr.handle, val.handle, eviction), tl.void)
if not mask.type.scalar.is_bool():
raise ValueError("Mask must have boolean scalar type")
return tl.tensor(builder.create_masked_store(ptr.handle, val.handle, mask.handle, eviction), tl.void)
#########
# atomic
#########
def atomic_cas(ptr: tl.tensor,
cmp: tl.tensor,
val: tl.tensor,
builder: ir.builder) -> tl.tensor:
element_ty = ptr.type.scalar.element_ty
if element_ty.primitive_bitwidth not in [16, 32, 64]:
raise ValueError("atomic_cas only supports elements with width {16, 32, 64}")
return tl.tensor(builder.create_atomic_cas(ptr.handle, cmp.handle, val.handle), val.type)
def atom_red_typechecking_impl(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
op: str,
builder: ir.builder) -> Tuple[tl.tensor, tl.tensor, tl.tensor]:
if not ptr.type.scalar.is_ptr():
raise ValueError("Pointer argument of store instruction is " + ptr.type.__repr__())
element_ty = ptr.type.scalar.element_ty
if element_ty is tl.float16 and op != 'add':
raise ValueError("atomic_" + op + " does not support fp16")
if element_ty in [tl.int1, tl.int8, tl.int16, tl.bfloat16]:
raise ValueError("atomic_" + op + " does not support " + element_ty)
if ptr.type.is_block():
if mask:
mask = broadcast_impl_shape(mask, ptr.type.get_block_shapes(), builder)
if val:
val = broadcast_impl_shape(val, ptr.type.get_block_shapes(), builder)
val = cast(val, ptr.type.scalar.element_ty, builder)
if not mask:
mask_ir = builder.get_int1(True)
mask_ty = tl.int1
if ptr.type.is_block():
mask_ir = builder.create_splat(mask_ir, ptr.type.get_block_shapes())
mask_ty = tl.block_type(tl.int1, ptr.type.get_block_shapes())
mask = tl.tensor(mask_ir, mask_ty)
return ptr, val, mask
def atomic_max(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
builder: ir.builder) -> tl.tensor:
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'max', builder)
sca_ty = val.type.scalar
# direct call to atomic_max for integers
if sca_ty.is_int():
if sca_ty.is_int_signed():
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MAX,
ptr.handle,
val.handle,
mask.handle),
val.type)
else:
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX,
ptr.handle,
val.handle,
mask.handle),
val.type)
# for float
# return atomic_smax(i_ptr, i_val) if val >= 0
# return atomic_umin(i_ptr, i_val) if val < 0
i_val = bitcast(val, tl.int32, builder)
i_ptr = bitcast(ptr, tl.pointer_type(tl.int32, 1), builder)
pos = greater_equal(val, tl.tensor(ir.constant_float.get(sca_ty.to_ir(builder), 0), sca_ty), builder)
neg = less_than(val, tl.tensor(ir.constant_float.get(sca_ty.to_ir(builder), 0), sca_ty), builder)
pos_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, i_ptr.handle, i_val.handle, and_(mask, pos, builder).handle), i_val.type)
neg_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, i_ptr.handle, i_val.handle, and_(mask, neg, builder).handle), i_val.type)
return where(pos, pos_ret, neg_ret, builder)
def atomic_min(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
builder: ir.builder) -> tl.tensor:
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'min', builder)
sca_ty = val.type.scalar
# direct call to atomic_min for integers
if sca_ty.is_int():
if sca_ty.is_int_signed():
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MIN,
ptr.handle,
val.handle,
mask.handle),
val.type)
else:
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN,
ptr.handle,
val.handle,
mask.handle),
val.type)
# for float
# return atomic_smin(i_ptr, i_val) if val >= 0
# return atomic_umax(i_ptr, i_val) if val < 0
i_val = bitcast(val, tl.int32, builder)
i_ptr = bitcast(ptr, tl.pointer_type(tl.int32, 1), builder)
pos = greater_equal(val, tl.tensor(ir.constant_float.get(sca_ty.to_ir(builder), 0), sca_ty), builder)
neg = less_than(val, tl.tensor(ir.constant_float.get(sca_ty.to_ir(builder), 0), sca_ty), builder)
pos_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MIN,
i_ptr.handle,
i_val.handle,
and_(mask, pos, builder).handle),
i_val.type)
neg_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMAX,
i_ptr.handle,
i_val.handle,
and_(mask, neg, builder).handle),
i_val.type)
return where(pos, pos_ret, neg_ret, builder)
def atomic_add(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
builder: ir.builder) -> tl.tensor:
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'add', builder)
sca_ty = val.type.scalar
op = ir.ATOMIC_OP.FADD if sca_ty.is_floating() else ir.ATOMIC_OP.ADD
return tl.tensor(builder.create_atomic_rmw(op, ptr.handle, val.handle, mask.handle), val.type)
def atomic_and(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
builder: ir.builder) -> tl.tensor:
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'and', builder)
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.AND, ptr.handle, val.handle, mask.handle), val.type)
def atomic_or(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
builder: ir.builder) -> tl.tensor:
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'or', builder)
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.OR, ptr.handle, val.handle, mask.handle), val.type)
def atomic_xor(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
builder: ir.builder) -> tl.tensor:
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xor', builder)
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XOR, ptr.handle, val.handle, mask.handle), val.type)
def atomic_xchg(ptr: tl.tensor,
val: tl.tensor,
mask: tl.tensor,
builder: ir.builder) -> tl.tensor:
ptr, val, mask = atom_red_typechecking_impl(ptr, val, mask, 'xchg', builder)
return tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.XCHG, ptr.handle, val.handle, mask.handle), val.type)
# ===----------------------------------------------------------------------===//
# Linear Algebra
# ===----------------------------------------------------------------------===//
def dot(a: tl.tensor,
b: tl.tensor,
trans_a: bool,
trans_b: bool,
allow_tf32: bool,
builder: ir.builder) -> tl.tensor:
in_a = 1 if not trans_a else 0
in_b = 1 if trans_b else 0
assert a.type.is_block() and b.type.is_block()
assert len(a.shape) == 2 and len(b.shape) == 2
assert a.shape[in_a] == b.shape[in_b]
assert a.shape[0] >= 16 and a.shape[1] >= 16 and b.shape[1] >= 16,\
"small blocks not supported!"
if a.type.scalar.is_int():
_0 = builder.get_int32(0)
ret_scalar_ty = tl.int32
else:
_0 = builder.get_float32(0)
ret_scalar_ty = tl.float32
M = a.type.shape[in_a ^ 1]
N = b.type.shape[in_b ^ 1]
_0 = builder.create_splat(_0, [M, N])
ret_ty = tl.block_type(ret_scalar_ty, [M, N])
ret = builder.create_dot(a.handle, b.handle, _0, trans_a, trans_b, allow_tf32)
return tl.tensor(ret, ret_ty)
# ===----------------------------------------------------------------------===//
# Indexing
# ===----------------------------------------------------------------------===//
def where(condition: tl.tensor,
x: tl.tensor,
y: tl.tensor,
builder: ir.builder) -> tl.tensor:
condition = cast(condition, tl.int1, builder)
if condition.type.is_block():
x = broadcast_impl_shape(x, condition.type.get_block_shapes(), builder)
y = broadcast_impl_shape(y, condition.type.get_block_shapes(), builder)
x, y = binary_op_type_checking_impl(x, y, builder, True, True)
ret_ty = x.type
return tl.tensor(builder.create_select(condition.handle, x.handle, y.handle), ret_ty)
# ===----------------------------------------------------------------------===//
# Reductions
# ===----------------------------------------------------------------------===
def reduce_impl(input: tl.tensor, axis: int, builder: ir.builder, name: str,
FLOAT_OP: ir.REDUCE_OP, INT_OP: ir.REDUCE_OP) -> tl.tensor:
scalar_ty = input.type.scalar
# input is extended to 32-bits if necessary
# this increases numerical accuracy and can be done pretty much for free
# on GPUs
if scalar_ty.is_int() and scalar_ty.int_bitwidth <= 32:
input = cast(input, tl.int32, builder)
# hardware doesn't support FMAX, FMIN, CMP for bfloat16
if scalar_ty is tl.bfloat16:
input = cast(input, tl.float32, builder)
# choose the right unsigned operation
if scalar_ty.is_int_unsigned():
int_op_to_unit = {
ir.REDUCE_OP.MIN: ir.REDUCE_OP.UMIN,
ir.REDUCE_OP.MAX: ir.REDUCE_OP.UMAX,
ir.REDUCE_OP.ARGMIN: ir.REDUCE_OP.ARGUMIN,
ir.REDUCE_OP.ARGMAX: ir.REDUCE_OP.ARGUMAX,
}
if INT_OP in int_op_to_unit:
INT_OP = int_op_to_unit[INT_OP]
# get result type
shape = input.type.shape
ret_shape = []
for i, s in enumerate(shape):
if i != axis:
ret_shape.append(s)
if len(ret_shape) == 0:
res_ty = scalar_ty
else:
res_ty = tl.block_type(scalar_ty, ret_shape)
if scalar_ty.is_floating():
return tl.tensor(builder.create_reduce(input.handle, FLOAT_OP, axis), res_ty)
elif scalar_ty.is_int():
return tl.tensor(builder.create_reduce(input.handle, INT_OP, axis), res_ty)
assert False
def min(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor:
return reduce_impl(input, axis, builder, "min", ir.REDUCE_OP.FMIN, ir.REDUCE_OP.MIN)
def argmin(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor:
return reduce_impl(input, axis, builder, "argmin", ir.REDUCE_OP.ARGFMIN, ir.REDUCE_OP.ARGMIN)
def max(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor:
return reduce_impl(input, axis, builder, "max", ir.REDUCE_OP.FMAX, ir.REDUCE_OP.MAX)
def argmax(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor:
return reduce_impl(input, axis, builder, "argmax", ir.REDUCE_OP.ARGFMAX, ir.REDUCE_OP.ARGMAX)
def sum(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor:
return reduce_impl(input, axis, builder, "sum", ir.REDUCE_OP.FADD, ir.REDUCE_OP.ADD)
def xor_sum(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor:
scalar_ty = input.type.scalar
if not scalar_ty.is_int():
raise ValueError("xor_sum only supported for integers")
return reduce_impl(input, axis, builder, "sum", ir.REDUCE_OP.XOR, ir.REDUCE_OP.XOR)
# -----------------------
# Utilities
# -----------------------
def clock(builder: ir.builder) -> tl.tensor:
return tl.tensor(builder.create_clock(), tl.int64)
def globaltimer(builder: ir.builder) -> tl.tensor:
return tl.tensor(builder.create_globaltimer, tl.int64)
# ===----------------------------------------------------------------------===
# Math
# ===----------------------------------------------------------------------===
def umulhi(x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor:
x, y = binary_op_type_checking_impl(x, y, builder)
return tl.tensor(builder.create_umulhi(x.handle, y.handle), x.type)
def exp(x: tl.tensor, builder: ir.builder) -> tl.tensor:
return tl.tensor(builder.create_exp(x.handle), x.type)
def log(x: tl.tensor, builder: ir.builder) -> tl.tensor:
return tl.tensor(builder.create_log(x.handle), x.type)
def cos(x: tl.tensor, builder: ir.builder) -> tl.tensor:
return tl.tensor(builder.create_cos(x.handle), x.type)
def sin(x: tl.tensor, builder: ir.builder) -> tl.tensor:
return tl.tensor(builder.create_sin(x.handle), x.type)
def sqrt(x: tl.tensor, builder: ir.builder) -> tl.tensor:
return tl.tensor(builder.create_sqrt(x.handle), x.type)
##
def multiple_of(x: tl.tensor, values: List[int]) -> tl.tensor:
if len(x.shape) != len(values):
raise ValueError("Shape of input to multiple_of does not match the length of values")
x.handle.multiple_of(values)
return x
def max_contiguous(x: tl.tensor, values: List[int]) -> tl.tensor:
if len(x.shape) != len(values):
raise ValueError("Shape of input to max_contiguous does not match the length of values")
x.handle.max_contiguous(values)
return x
def debug_barrier(builder: ir.builder) -> tl.tensor:
return tl.tensor(builder.create_barrier(''), tl.void)
| triton-master | python/triton/language/semantic.py |
# flake8: noqa: F401
from . import core, extern, libdevice, random
from .core import *
from .random import *
| triton-master | python/triton/language/__init__.py |
from __future__ import annotations
from enum import Enum
from functools import wraps
from typing import List
import triton
from . import semantic
from triton._C.libtriton.triton import ir
def _to_tensor(x, builder):
if isinstance(x, bool):
return tensor(builder.get_int1(x), int1)
# Note: compile-time const integers are represented by unsigned values
elif isinstance(x, int):
if -2**31 <= x < 2**31:
return tensor(builder.get_int32(x), int32)
elif 2**31 <= x < 2**32:
return tensor(builder.get_uint32(x), uint32)
elif -2**63 <= x < 2**63:
return tensor(builder.get_int64(x), int64)
elif 2**63 <= x < 2**64:
return tensor(builder.get_uint64(x), uint64)
else:
raise RuntimeError(f'Nonrepresentable integer {x}.')
elif isinstance(x, float):
return tensor(builder.get_float32(x), float32)
elif isinstance(x, constexpr):
if x.value is None:
return None
return _to_tensor(x.value, builder)
elif isinstance(x, tensor):
return x
elif x is None:
return None
assert False, f'cannot convert {x} to tensor'
def builtin(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if '_builder' not in kwargs or \
kwargs['_builder'] is None:
raise ValueError("Did you forget to add @triton.jit ? (`_builder` argument must be provided outside of JIT functions.)")
return fn(*args, **kwargs)
return wrapper
class dtype:
SINT_TYPES = ['int1', 'int8', 'int16', 'int32', 'int64']
UINT_TYPES = ['uint8', 'uint16', 'uint32', 'uint64']
FP_TYPES = ['fp8', 'fp16', 'bf16', 'fp32', 'fp64']
OTHER_TYPES = ['void']
class SIGNEDNESS(Enum):
SIGNED = 0
UNSIGNED = 1
def __init__(self, name):
self.name = name
assert name in dtype.SINT_TYPES + dtype.UINT_TYPES + dtype.FP_TYPES + dtype.OTHER_TYPES, name
if name in dtype.SINT_TYPES:
self.int_signedness = dtype.SIGNEDNESS.SIGNED
self.int_bitwidth = int(name.split('int')[-1])
self.primitive_bitwidth = self.int_bitwidth
elif name in dtype.UINT_TYPES:
self.int_signedness = dtype.SIGNEDNESS.UNSIGNED
self.int_bitwidth = int(name.split('int')[-1])
self.primitive_bitwidth = self.int_bitwidth
elif name in dtype.FP_TYPES:
if name == 'fp8':
self.fp_mantissa_width = 3
self.primitive_bitwidth = 8
elif name == 'fp16':
self.fp_mantissa_width = 10
self.primitive_bitwidth = 16
elif name == 'bf16':
self.fp_mantissa_width = 7
self.primitive_bitwidth = 16
elif name == 'fp32':
self.fp_mantissa_width = 23
self.primitive_bitwidth = 32
elif name == 'fp64':
self.fp_mantissa_width = 53
self.primitive_bitwidth = 64
elif name == 'void':
self.primitive_bitwidth = 0
def is_fp8(self):
return self.name == 'fp8'
def is_fp16(self):
return self.name == 'fp16'
def is_bf16(self):
return self.name == 'bf16'
def is_fp32(self):
return self.name == 'fp32'
def is_fp64(self):
return self.name == 'fp64'
def is_int1(self):
return self.name == 'int1'
def is_int8(self):
return self.name == 'int8'
def is_int16(self):
return self.name == 'int16'
def is_int32(self):
return self.name == 'int32'
def is_int64(self):
return self.name == 'int64'
def is_uint8(self):
return self.name == 'uint8'
def is_uint16(self):
return self.name == 'uint16'
def is_uint32(self):
return self.name == 'uint32'
def is_uint64(self):
return self.name == 'uint64'
def is_floating(self):
return self.name in dtype.FP_TYPES
def is_int_signed(self):
return self.name in dtype.SINT_TYPES
def is_int_unsigned(self):
return self.name in dtype.UINT_TYPES
def is_int(self):
return self.name in dtype.SINT_TYPES + dtype.UINT_TYPES
def is_bool(self):
return self.is_int1()
def is_void(self):
return self.name == 'void'
def is_block(self):
return False
def is_ptr(self):
return False
def __eq__(self, other: dtype):
if not isinstance(other, dtype):
return False
return self.name == other.name
def __ne__(self, other: dtype):
return not self.__eq__(other)
def __hash__(self):
return hash((self.name,))
@property
def scalar(self):
return self
def to_ir(self, builder: ir.builder) -> ir.type:
if self.name == 'void':
return builder.get_void_ty()
elif self.name == 'int1':
return builder.get_int1_ty()
elif self.name == 'int8' or self.name == 'uint8':
return builder.get_int8_ty()
elif self.name == 'int16' or self.name == 'uint16':
return builder.get_int16_ty()
elif self.name == 'int32' or self.name == 'uint32':
return builder.get_int32_ty()
elif self.name == 'int64' or self.name == 'uint64':
return builder.get_int64_ty()
elif self.name == 'fp8':
return builder.get_fp8_ty()
elif self.name == 'fp16':
return builder.get_half_ty()
elif self.name == 'bf16':
return builder.get_bf16_ty()
elif self.name == 'fp32':
return builder.get_float_ty()
elif self.name == 'fp64':
return builder.get_double_ty()
raise ValueError(f'fail to covert {self} to ir type')
def __str__(self):
return self.name
@property
def cache_key_part(self) -> str:
"""See cache_key_part() in triton.cc."""
return self.name
def __repr__(self):
return f'triton.language.{self.name}'
class pointer_type(dtype):
def __init__(self, element_ty: dtype, address_space: int = 1):
if not isinstance(element_ty, dtype):
raise TypeError('element_ty is a {type(element_ty).__name__}.')
self.element_ty = element_ty
self.address_space = address_space
self.name = self.__str__()
def to_ir(self, builder: ir.builder) -> ir.pointer_type:
return ir.type.make_ptr(self.element_ty.to_ir(builder), 1)
def __str__(self):
return f'pointer<{self.element_ty}>'
def __repr__(self):
return self.__str__()
def is_ptr(self):
return True
def __eq__(self, other: pointer_type) -> bool:
if not isinstance(other, pointer_type):
return False
return self.element_ty == other.element_ty and self.address_space == other.address_space
def __ne__(self, other: pointer_type) -> bool:
return not self.__eq__(other)
@property
def scalar(self):
return self
class block_type(dtype):
def __init__(self, element_ty: dtype, shape: List[int]):
self.element_ty = element_ty
# FIXME:
# block_type's shape is a list of int
# while tensor's shape is a list of constexpr
self.shape = shape
self.numel = 1
for i, s in enumerate(self.shape):
if isinstance(s, constexpr):
self.shape[i] = s.value
self.numel *= self.shape[i]
self.name = self.__str__()
def to_ir(self, builder: ir.builder) -> ir.block_type:
return ir.type.make_block(self.element_ty.to_ir(builder), self.shape)
def __str__(self):
return f'<{self.shape}, {self.element_ty}>'
def __repr__(self):
return self.__str__()
def is_block(self):
return True
def get_block_shapes(self) -> List[int]:
return self.shape
def __eq__(self, other: block_type) -> bool:
if not isinstance(other, block_type):
return False
return self.element_ty == other.element_ty and self.shape == other.shape
def __ne__(self, other: block_type) -> bool:
return not self.__eq__(other)
@property
def scalar(self):
return self.element_ty
class function_type(dtype):
def __init__(self, ret_type: dtype, param_types: List[dtype]) -> None:
self.ret_type = ret_type
self.param_types = param_types
def __str__(self):
return f'fn ({self.param_types}) -> {self.ret_type}'
def to_ir(self, builder: ir.builder):
ir_param_types = [ty.to_ir(builder) for ty in self.param_types]
return ir.type.make_function(self.ret_type.to_ir(builder), ir_param_types)
class tuple_type(dtype):
def __init__(self, element_types: List[dtype]) -> None:
self.element_types = element_types
def __str__(self):
return f'<{self.element_types}>'
def to_ir(self, builder: ir.builder):
ir_element_types = [ty.to_ir(builder) for ty in self.element_types]
return ir.struct_type.get(ir_element_types, True)
# scalar types
void = dtype('void')
int1 = dtype('int1')
int8 = dtype('int8')
int16 = dtype('int16')
int32 = dtype('int32')
int64 = dtype('int64')
uint8 = dtype('uint8')
uint16 = dtype('uint16')
uint32 = dtype('uint32')
uint64 = dtype('uint64')
float8 = dtype('fp8')
float16 = dtype('fp16')
bfloat16 = dtype('bf16')
float32 = dtype('fp32')
float64 = dtype('fp64')
# pointer types
pi32_t = pointer_type(int32)
# -----------------------
# constexpr
# -----------------------
class constexpr:
"""
This class is used to store a value that is known at compile-time.
"""
def __init__(self, value):
if isinstance(value, constexpr):
self.value = value.value
else:
self.value = value
def __repr__(self) -> str:
return f"constexpr[{self.value}]"
def __bool__(self):
return bool(self.value)
def __ge__(self, other):
other = other.value if isinstance(other, constexpr) else other
return self.value >= other
def __gt__(self, other):
other = other.value if isinstance(other, constexpr) else other
return self.value > other
def __le__(self, other):
other = other.value if isinstance(other, constexpr) else other
return self.value <= other
def __lt__(self, other):
other = other.value if isinstance(other, constexpr) else other
return self.value < other
def __eq__(self, other):
other = other.value if isinstance(other, constexpr) else other
return self.value == other
def __call__(self, *args, **kwds):
return self.value(*args, **kwds)
def to(self, dtype, bitcast=False, _builder=None):
if dtype in [float8, float16, bfloat16]:
raise ValueError("floating point constexpr must be float64")
if dtype.is_int():
ret_ty = int
elif dtype.is_bool():
ret_ty = bool
elif dtype.is_floating():
ret_ty = float
return constexpr(ret_ty(self.value))
class tensor:
# infer dtype from ir type
@staticmethod
def _to_dtype(ir_type):
# block type
if ir_type.is_block():
scalar_ty = tensor._to_dtype(ir_type.scalar)
return block_type(scalar_ty, ir_type.get_block_shapes())
# pointer type
if ir_type.is_ptr():
element_ty = tensor._to_dtype(ir_type.element)
return pointer_type(element_ty)
# primitive type
if ir_type.is_void(): return void
if ir_type.is_int1(): return int1
if ir_type.is_int8(): return int8
if ir_type.is_int16(): return int16
if ir_type.is_int32(): return int32
if ir_type.is_int64(): return int64
if ir_type.is_fp8(): return float8
if ir_type.is_fp16(): return float16
if ir_type.is_bf16(): return bfloat16
if ir_type.is_fp32(): return float32
if ir_type.is_fp64(): return float64
raise ValueError(f"Unsupported type {ir_type.repr()}")
def __init__(self, handle, type: dtype):
# IR handle
self.handle = handle
# Block shape
self.shape = (1, )
if self.handle.type.is_block():
self.shape = self.handle.type.shape
self.numel = 1
for s in self.shape:
self.numel *= s
is_pow2 = (self.numel and (not(self.numel & (self.numel - 1))))
if not is_pow2:
raise ValueError("Triton tensors must have a power-of-two number of elements")
self.numel = constexpr(self.numel)
self.type = type # Tensor type (can be block_type)
# Following the practice in pytorch, dtype is scalar type
self.dtype = type.scalar
self.shape = [constexpr(s) for s in self.shape]
def __str__(self) -> str:
# ex. "float32[3,4]"
return str(self.dtype) + '[' + ','.join(str(s) for s in self.shape) + ']'
@builtin
def __add__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.add(self, other, _builder)
def __radd__(self, other, _builder=None):
return self.__add__(other, _builder=_builder)
@builtin
def __sub__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.sub(self, other, _builder)
def __rsub__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.sub(other, self, _builder)
@builtin
def __mul__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.mul(self, other, _builder)
def __rmul__(self, other, _builder=None):
return self.__mul__(other, _builder=_builder)
@builtin
def __truediv__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.truediv(self, other, _builder)
def __rtruediv__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.truediv(other, self, _builder)
@builtin
def __floordiv__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.floordiv(self, other, _builder)
@builtin
def __rfloordiv__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.floordiv(other, self, _builder)
@builtin
def __mod__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.mod(self, other, _builder)
@builtin
def __rmod__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.mod(other, self, _builder)
# unary operators
@builtin
def __neg__(self, _builder=None):
return semantic.minus(self, _builder)
@builtin
def __invert__(self, _builder=None):
return semantic.invert(self, _builder)
# bitwise operators
@builtin
def __and__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.and_(self, other, _builder)
@builtin
def __or__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.or_(self, other, _builder)
@builtin
def __xor__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.xor_(self, other, _builder)
@builtin
def __lshift__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.shl(self, other, _builder)
@builtin
def __rshift__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.lshr(self, other, _builder)
# comparison operators
# >
@builtin
def __gt__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.greater_than(self, other, _builder)
@builtin
def __rgt__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.greater_than(other, self, _builder)
# >=
@builtin
def __ge__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.greater_equal(self, other, _builder)
@builtin
def __rge__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.greater_equal(other, self, _builder)
# <
@builtin
def __lt__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.less_than(self, other, _builder)
@builtin
def __rlt__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.less_than(other, self, _builder)
# <=
@builtin
def __le__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.less_equal(self, other, _builder)
@builtin
def __rle__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.less_equal(other, self, _builder)
# ==
@builtin
def __eq__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.equal(self, other, _builder)
@builtin
def __ne__(self, other, _builder=None):
other = _to_tensor(other, _builder)
return semantic.not_equal(self, other, _builder)
@builtin
def __getitem__(self, slices, _builder=None):
if isinstance(slices, slice):
slices = [slices]
src_shape = self.shape
dst_shape = []
curr = 0
for sl in slices:
if isinstance(sl, constexpr) and sl.value is None:
dst_shape.append(1)
elif sl == slice(None, None, None):
dst_shape.append(src_shape[curr].value)
curr += 1
ret = semantic.reshape(self, dst_shape, _builder)
return ret
@builtin
def to(self, dtype, bitcast=False, _builder=None):
if isinstance(bitcast, constexpr):
bitcast = bitcast.value
if bitcast:
return semantic.bitcast(self, dtype, _builder)
return semantic.cast(self, dtype, _builder)
# -----------------------
# SPMD Programming Model
# -----------------------
def _constexpr_to_value(v):
if isinstance(v, constexpr):
return v.value
return v
@builtin
def program_id(axis, _builder=None):
"""
Returns the id of the current program instance along the given :code:`axis`.
:param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2.
:type axis: int
"""
# if axis == -1:
# pid0 = program_id(0, _builder)
# pid1 = program_id(1, _builder)
# pid2 = program_id(2, _builder)
# npg0 = num_programs(0, _builder)
# npg1 = num_programs(0, _builder)
# return pid0 + pid1*npg0 + pid2*npg0*npg1
axis = _constexpr_to_value(axis)
return semantic.program_id(axis, _builder)
@builtin
def num_programs(axis, _builder=None):
"""
Returns the number of program instances launched along the given :code:`axis`.
:param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2.
:type axis: int
"""
axis = _constexpr_to_value(axis)
return semantic.num_programs(axis, _builder)
# -----------------------
# Block Initialization
# -----------------------
@builtin
def arange(start, end, _builder=None):
"""
Returns contiguous values within the open interval [:code:`start`, :code:`end`).
:param start: Start of the interval. Must be a power of two.
:type start: int
:param stop: End of the interval. Must be a power of two >= start.
:type stop: int
"""
start = _constexpr_to_value(start)
end = _constexpr_to_value(end)
return semantic.arange(start, end, _builder)
@builtin
def zeros(shape, dtype, _builder=None):
"""
Returns a tensor filled with the scalar value 0 for the given :code:`shape` and :code:`dtype`.
:param shape: Shape of the new array, e.g., (8, 16) or (8, )
:type shape: tuple of ints
:param dtype: Data-type of the new array, e.g., :code:`tl.float16`
:type dtype: DType
"""
for i, d in enumerate(shape):
if not isinstance(d, constexpr):
raise TypeError(f"Shape element {i} must have type `constexpr`")
if not isinstance(d.value, int):
raise TypeError(f"Shape element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]")
shape = [x.value for x in shape]
dtype = _constexpr_to_value(dtype)
return semantic.zeros(shape, dtype, _builder)
# -----------------------
# Shape Manipulation
# -----------------------
@builtin
def broadcast(input, other, _builder=None):
"""
Tries to broadcast the two given blocks to a common compatible shape.
:param input: The first input tensor.
:type input: Block
:param other: The second input tensor.
:type other: Block
"""
return semantic.broadcast_impl_value(input, other, _builder)
@builtin
def broadcast_to(input, shape, _builder=None):
"""
Tries to broadcast the given tensor to a new :code:`shape`.
:param input: The input tensor.
:type input: Block
:param shape: The desired shape.
:type shape: Tuple[int]
"""
return semantic.broadcast_impl_shape(input, shape, _builder)
@builtin
def cat(input, other, _builder=None):
"""
Concatenate the given blocks
:param input: The first input tensor.
:type input:
:param other: The second input tensor.
:type other:
"""
return semantic.cat(input, other, _builder)
@builtin
def reshape(input, shape, _builder=None):
"""
Tries to reshape the given tensor to a new shape.
:param input: The input tensor.
:type input:
:param shape: The desired shape.
:type shape: Tuple[int]
"""
shape = [x.value for x in shape]
return semantic.reshape(input, shape, _builder)
# -----------------------
# Linear Algebra
# -----------------------
@builtin
def dot(input, other, trans_a=False, trans_b=False, allow_tf32=True, _builder=None):
"""
Returns the matrix product of two blocks.
The two blocks must be two dimensionals and have compatible inner dimensions.
:param input: The first tensor to be multiplied.
:type input: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`}
:param other: The second tensor to be multiplied.
:type other: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`}
"""
allow_tf32 = _constexpr_to_value(allow_tf32)
return semantic.dot(input, other, trans_a, trans_b, allow_tf32, _builder)
# -----------------------
# Non-Atomic Memory Operations
# -----------------------
@builtin
def load(pointer, mask=None, other=None, cache_modifier="", eviction_policy="", volatile=False, _builder=None):
"""
Return a tensor of data whose values are, elementwise, loaded from memory at location defined by :code:`pointer`.
:code:`mask` and :code:`other` are implicitly broadcast to :code:`pointer.shape`.
:code:`other` is implicitly typecast to :code:`pointer.dtype.element_ty`.
:param pointer: Pointers to the data to be loaded.
:type pointer: Block of dtype=triton.PointerDType
:param mask: if mask[idx] is false, do not load the data at address :code:`pointer[idx]`.
:type mask: Block of triton.int1, optional
:param other: if mask[idx] is false, return other[idx]
:type other: Block, optional
:param cache_modifier: changes cache option in nvidia ptx
'type cache_modifier: str, optional
"""
# mask, other can be constexpr
if mask is not None:
mask = _to_tensor(mask, _builder)
if other is not None:
other = _to_tensor(other, _builder)
cache_modifier = _constexpr_to_value(cache_modifier)
eviction_policy = _constexpr_to_value(eviction_policy)
volatile = _constexpr_to_value(volatile)
return semantic.load(pointer, mask, other, cache_modifier, eviction_policy, volatile, _builder)
@builtin
def store(pointer, value, mask=None, eviction_policy="", _builder=None):
"""
Stores :code:`value` tensor of elements in memory, element-wise, at the memory locations specified by :code:`pointer`.
:code:`value` is implicitly broadcast to :code:`pointer.shape` and typecast to :code:`pointer.dtype.element_ty`.
:param pointer: The memory locations where the elements of :code:`value` are stored.
:type pointer: Block of dtype=triton.PointerDType
:param value: The tensor of elements to be stored.
:type value: Block
:param mask: If mask[idx] is false, do not store :code:`value[idx]` at :code:`pointer[idx]`.
:type mask: Block of triton.int1, optional
"""
# value can be constexpr
value = _to_tensor(value, _builder)
if mask is not None:
mask = _to_tensor(mask, _builder)
return semantic.store(pointer, value, mask, eviction_policy, _builder)
# -----------------------
# Atomic Memory Operations
# -----------------------
@builtin
def atomic_cas(pointer, cmp, val, _builder=None):
"""
Performs an atomic compare-and-swap at the memory location specified by :code:`pointer`.
Return the data stored at :code:`pointer` before the atomic operation.
:param pointer: The memory locations to compare-and-swap.
:type pointer: Block of dtype=triton.PointerDType
:param cmp: The values expected to be found in the atomic object
:type cmp: Block of dtype=`pointer.dtype.element_ty`
:param val: The values to copy in case the expected value matches the contained value.
:type val: Block of dtype=`pointer.dtype.element_ty`
"""
cmp = _to_tensor(cmp, _builder)
val = _to_tensor(val, _builder)
return semantic.atomic_cas(pointer, cmp, val, _builder)
def _add_atomic_docstr(name):
def _decorator(func):
docstr = """
Performs an atomic {name} at the memory location specified by :code:`pointer`.
Return the data stored at :code:`pointer` before the atomic operation.
:param pointer: The memory locations to apply {name}.
:type pointer: Block of dtype=triton.PointerDType
:param val: The values to {name} in the atomic object.
:type val: Block of dtype=`pointer.dtype.element_ty`
:param mask: If mask[idx] is false, do not apply {name}.
:type mask: Block of triton.int1, optional
"""
func.__doc__ = docstr.format(name=name)
return func
return _decorator
@builtin
@_add_atomic_docstr("exchange")
def atomic_xchg(pointer, val, mask=None, _builder=None):
val = _to_tensor(val, _builder)
return semantic.atomic_xchg(pointer, val, mask, _builder)
@builtin
@_add_atomic_docstr("add")
def atomic_add(pointer, val, mask=None, _builder=None):
val = _to_tensor(val, _builder)
return semantic.atomic_add(pointer, val, mask, _builder)
@builtin
@_add_atomic_docstr("max")
def atomic_max(pointer, val, mask=None, _builder=None):
val = _to_tensor(val, _builder)
return semantic.atomic_max(pointer, val, mask, _builder)
@builtin
@_add_atomic_docstr("min")
def atomic_min(pointer, val, mask=None, _builder=None):
val = _to_tensor(val, _builder)
return semantic.atomic_min(pointer, val, mask, _builder)
@builtin
@_add_atomic_docstr("logical and")
def atomic_and(pointer, val, mask=None, _builder=None):
val = _to_tensor(val, _builder)
return semantic.atomic_and(pointer, val, mask, _builder)
@builtin
@_add_atomic_docstr("logical or")
def atomic_or(pointer, val, mask=None, _builder=None):
val = _to_tensor(val, _builder)
return semantic.atomic_or(pointer, val, mask, _builder)
@builtin
@_add_atomic_docstr("logical xor")
def atomic_xor(pointer, val, mask=None, _builder=None):
val = _to_tensor(val, _builder)
return semantic.atomic_xor(pointer, val, mask, _builder)
# -----------------------
# Conditioning
# -----------------------
@builtin
def where(condition, x, y, _builder=None):
"""
Returns a tensor of elements from either :code:`x` or :code:`y`, depending on :code:`condition`.
Note that :code:`x` and :code:`y` are always evaluated regardless of the value of :code:`condition`.
If you want to avoid unintented memory operations, use the :code:`mask` arguments in `triton.load` and `triton.store` instead.
The shape of :code:`x` and :code:`y` are both broadcast to the shape of :code:`condition`.
:code:`x` and :code:`y` must have the data type.
:param condition: When True (nonzero), yield x, otherwise yield y.
:type condition: Block of triton.bool
:param x: values selected at indices where condition is True.
:param y: values selected at indices where condition is False.
"""
condition = _to_tensor(condition, _builder)
x = _to_tensor(x, _builder)
y = _to_tensor(y, _builder)
return semantic.where(condition, x, y, _builder)
# -----------------------
# Math
# -----------------------
@builtin
def umulhi(x, y, _builder=None):
x = _to_tensor(x, _builder)
y = _to_tensor(y, _builder)
return semantic.umulhi(x, y, _builder)
@builtin
def fdiv(x, y, ieee_rounding=False, _builder=None):
ieee_rounding = _constexpr_to_value(ieee_rounding)
return semantic.fdiv(x, y, ieee_rounding, _builder)
def _add_math_1arg_docstr(name):
def _decorator(func):
docstr = """
Computes the element-wise {name} of :code:`x`
:param x: the input values
:type x: Block
"""
func.__doc__ = docstr.format(name=name)
return func
return _decorator
@builtin
@_add_math_1arg_docstr("exponential")
def exp(x, _builder=None):
return semantic.exp(x, _builder)
@builtin
@_add_math_1arg_docstr("natural logarithm")
def log(x, _builder=None):
return semantic.log(x, _builder)
@builtin
@_add_math_1arg_docstr("cosine")
def cos(x, _builder=None):
return semantic.cos(x, _builder)
@builtin
@_add_math_1arg_docstr("sine")
def sin(x, _builder=None):
return semantic.sin(x, _builder)
@builtin
@_add_math_1arg_docstr("square root")
def sqrt(x, _builder=None):
return semantic.sqrt(x, _builder)
# -----------------------
# Reductions
# -----------------------
def _add_reduction_docstr(name):
def _decorator(func):
docstr = """
Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis`
:param input: the input values
:param axis: the dimension along which the reduction should be done
"""
func.__doc__ = docstr.format(name=name)
return func
return _decorator
@builtin
@_add_reduction_docstr("maximum")
def max(input, axis, _builder=None):
axis = _constexpr_to_value(axis)
return semantic.max(input, axis, _builder)
@builtin
@_add_reduction_docstr("maximum index")
def argmax(input, axis, _builder=None):
axis = _constexpr_to_value(axis)
return semantic.argmax(input, axis, _builder)
@builtin
@_add_reduction_docstr("minimum")
def min(input, axis, _builder=None):
axis = _constexpr_to_value(axis)
return semantic.min(input, axis, _builder)
@builtin
@_add_reduction_docstr("minimum index")
def argmin(input, axis, _builder=None):
axis = _constexpr_to_value(axis)
return semantic.argmin(input, axis, _builder)
@builtin
@_add_reduction_docstr("sum")
def sum(input, axis, _builder=None):
axis = _constexpr_to_value(axis)
return semantic.sum(input, axis, _builder)
@builtin
@_add_reduction_docstr("xor sum")
def xor_sum(input, axis, _builder=None):
axis = _constexpr_to_value(axis)
return semantic.xor_sum(input, axis, _builder)
# -----------------------
# Utilities
# -----------------------
@builtin
def globaltimer(_builder=None):
return semantic.globaltimer(_builder)
@builtin
def clock(_builder=None):
return semantic.clock(_builder)
# -----------------------
# Internal for debugging
# -----------------------
@builtin
def debug_barrier(_builder=None):
return semantic.debug_barrier(_builder)
@builtin
def multiple_of(input, values, _builder=None):
"""
Let the compiler knows that the values in :code:`input` are all multiples of :code:`value`.
"""
if isinstance(values, constexpr):
values = [values]
for i, d in enumerate(values):
if not isinstance(d, constexpr):
raise TypeError(f"values element {i} must have type `constexpr`")
if not isinstance(d.value, int):
raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]")
values = [x.value for x in values]
return semantic.multiple_of(input, values)
@builtin
def max_contiguous(input, values, _builder=None):
"""
Let the compiler knows that the `value` first values in :code:`input` are contiguous.
"""
if isinstance(values, constexpr):
values = [values]
for i, d in enumerate(values):
if not isinstance(d, constexpr):
raise TypeError(f"values element {i} must have type `constexpr`")
if not isinstance(d.value, int):
raise TypeError(f"values element {i} must have type `constexpr[int]`, got `constexpr[{type(d.value)}]")
values = [x.value for x in values]
return semantic.max_contiguous(input, values)
# -----------------------
# Standard library
# -----------------------
@triton.jit
def abs(x):
return where(x >= 0, x, -x)
@triton.jit
def cdiv(x, div):
"""
Computes the ceiling division of :code:`x` by :code:`div`
:param x: the input number
:type input: Block
:param div: the divisor
:param div: Block
"""
return (x + div - 1) // div
@triton.jit
def minimum(x, y):
"""
Computes the element-wise minimum of :code:`x` and :code:`y`.
:param input: the first input tensor
:type input: Block
:param other: the second input tensor
:type other: Block
"""
return triton.language.where(x < y, x, y)
@triton.jit
def maximum(x, y):
"""
Computes the element-wise maximum of :code:`x` and :code:`y`.
:param input: the first input tensor
:type input: Block
:param other: the second input tensor
:type other: Block
"""
return triton.language.where(x > y, x, y)
@triton.jit
@_add_math_1arg_docstr("sigmoid")
def sigmoid(x):
return 1 / (1 + triton.language.exp(-x))
@triton.jit
@_add_math_1arg_docstr("softmax")
def softmax(x, ieee_rounding: constexpr = False):
z = x - triton.language.max(x, 0)
num = triton.language.exp(z)
den = triton.language.sum(num, 0)
return fdiv(num, den, ieee_rounding)
@triton.jit
def ravel(x):
"""
Returns a contiguous flattened view of :code:`x`
:param x: the input tensor
:type x: Block
"""
return triton.language.reshape(x, [x.numel])
@triton.jit
def swizzle2d(i, j, size_i, size_j, size_g):
"""
transformes indices of a row-major size_i*size_j matrix into those
of one where indices are row major for each group of size_j rows.
For example, for size_i = size_j = 4 and size_g = 2, it will transform
[[0 , 1 , 2 , 3 ],
[4 , 5 , 6 , 7 ],
[8 , 9 , 10, 11],
[12, 13, 14, 15]]
into
[[0, 2, 4 , 6 ],
[1, 3, 5 , 7 ],
[8, 10, 12, 14],
[9, 11, 13, 15]]
"""
# "unrolled index in array"
ij = i * size_j + j
# number of elements in `size_g` groups
# of `size_j` columns
size_gj = size_g * size_j
# index of the group in which (i,j) is
group_id = ij // size_gj
# row-index of the first element of this group
off_i = group_id * size_g
# last group may have fewer rows
size_g = minimum(size_i - off_i, size_g)
# new row and column indices
new_i = off_i + (ij % size_g)
new_j = (ij % size_gj) // size_g
return new_i, new_j
@triton.jit
def zeros_like(input):
return zeros(input.shape, input.dtype)
# -----------------------
# Dynamic Parallelism
# -----------------------
# class LaunchProxy:
# def __init__(self, fn, args, constants, grid, num_warps) -> None:
# self.args = args
# self.grid = grid
# self.constants = constants
# self.num_warps = num_warps
# self.fn = fn
# @builtin
# def launch(fn, args, grid, num_warps=None, _builder=None):
# constants = {i: x for i, x in enumerate(args) if isinstance(x, constexpr)}
# args = [_to_ir(x, builder=_builder) for x in args if not isinstance(x, constexpr)]
# grid = [_to_ir(x, builder=_builder) for x in grid]
# if num_warps is None:
# num_warps = _to_ir(4, builder=_builder)
# return LaunchProxy(fn, args, constants, grid, num_warps)
| triton-master | python/triton/language/core.py |
import triton
from . import core as tl
PHILOX_KEY_A: tl.constexpr = -1640531527 # 0x9E3779B9
PHILOX_KEY_B: tl.constexpr = -1150833019 # 0xBB67AE85
PHILOX_ROUND_A: tl.constexpr = -766435501 # 0xD2511F53
PHILOX_ROUND_B: tl.constexpr = -845247145 # 0xCD9E8D57
N_ROUNDS_DEFAULT = 10 # Default number of rounds for philox
# -------------------
# randint
# -------------------
@triton.jit
def philox_impl(c0, c1, c2, c3, k0, k1, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Run `n_rounds` rounds of Philox for state (c0, c1, c2, c3) and key (k0, k1).
"""
for _ in range(n_rounds):
# update random state
A = PHILOX_ROUND_A
B = PHILOX_ROUND_B
_c0, _c2 = c0, c2
c0 = tl.umulhi(B, _c2) ^ c1 ^ k0
c2 = tl.umulhi(A, _c0) ^ c3 ^ k1
c1 = B * _c2
c3 = A * _c0
# raise key
k0 = k0 + PHILOX_KEY_A
k1 = k1 + PHILOX_KEY_B
return c0, c1, c2, c3
@triton.jit
def philox(seed, c0, c1, c2, c3, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
seed = seed.to(tl.uint64)
seed_hi = ((seed >> 32) & 0xffffffff).to(tl.uint32)
seed_lo = (seed & 0xffffffff).to(tl.uint32)
return philox_impl(c0, c1, c2, c3, seed_lo, seed_hi, n_rounds)
@triton.jit
def randint(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns a single
block of random :code:`int32`.
If you need multiple streams of random numbers,
using `randint4x` is likely to be faster than calling `randint` 4 times.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
ret, _, _, _ = randint4x(seed, offset, n_rounds)
return ret
@triton.jit
def randint4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns four
blocks of random :code:`int32`.
This is the maximally efficient entry point
to Triton's Philox pseudo-random number generator.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
# _0 = tl.zeros(offset.shape, offset.dtype)
_0 = offset * 0
return philox(seed, offset, _0, _0, _0, n_rounds)
# -------------------
# rand
# -------------------
# @triton.jit
# def uint32_to_uniform_float(x):
# """
# Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1).
# """
# two_to_the_minus_32: tl.constexpr = 2.328306e-10
# return x * two_to_the_minus_32
@triton.jit
def uint32_to_uniform_float(x):
"""
Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1).
"""
x = x.to(tl.int32, bitcast=True)
# maximum value such that `MAX_INT * scale < 1.0` (with float rounding)
scale = 4.6566127342e-10
x = tl.where(x < 0, -x - 1, x)
return x * scale
@triton.jit
def rand(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
offset = offset.to(tl.uint32, bitcast=True)
source = randint(seed, offset, n_rounds)
return uint32_to_uniform_float(source)
@triton.jit
def rand4x(seed, offsets, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offsets` block,
returns a 4 blocks of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
offsets = offsets.to(tl.uint32, bitcast=True)
i1, i2, i3, i4 = randint4x(seed, offsets, n_rounds)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
u3 = uint32_to_uniform_float(i3)
u4 = uint32_to_uniform_float(i4)
return u1, u2, u3, u4
# -------------------
# randn
# -------------------
@triton.jit
def pair_uniform_to_normal(u1, u2):
"""Box-Muller transform"""
u1 = tl.maximum(1.0e-7, u1)
th = 6.283185307179586 * u2
r = tl.sqrt(-2.0 * tl.log(u1))
return r * tl.cos(th), r * tl.sin(th)
@triton.jit
def randn(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
i1, i2, _, _ = randint4x(seed, offset, n_rounds)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
n1, _ = pair_uniform_to_normal(u1, u2)
return n1
@triton.jit
def randn4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a 4 blocks of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
u1, u2, u3, u4 = rand4x(seed, offset, n_rounds)
n1, n2 = pair_uniform_to_normal(u1, u2)
n3, n4 = pair_uniform_to_normal(u3, u4)
return n1, n2, n3, n4
| triton-master | python/triton/language/random.py |
from __future__ import annotations # remove after python 3.11
from . import core, semantic
def dispatch(func, lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, ret_shape: tuple, _builder=None):
'''
Dispatch a function to a library
:param func: the function to dispatch
:param lib_name: the name of the library
:param lib_path: the path of the library
:param args: the arguments of the function
:param arg_type_symbol_dict: the type of the arguments
:param ret_shape: the shape of the return value
:param _builder: the builder
:return: the return value of the function
'''
if len(arg_type_symbol_dict) == 0:
raise ValueError("arg_type_symbol_dict is empty")
num_args = len(list(arg_type_symbol_dict.keys())[0])
if len(args) != num_args:
raise ValueError(f"length of input args does not match."
f"Expect {len(args)}, got {num_args}")
arg_types = []
arg_list = []
for arg in args:
if isinstance(arg, core.tensor):
arg_types.append(arg.dtype)
arg_list.append(arg.handle)
else:
arg_types.append(type(arg))
arg_list.append(arg)
arg_types = tuple(arg_types)
if arg_types not in arg_type_symbol_dict:
raise ValueError(f"input arg type does not match."
f"Expect one of {arg_type_symbol_dict.keys()}, got {arg_types}")
else:
symbol = arg_type_symbol_dict[arg_types][0]
ret_type = arg_type_symbol_dict[arg_types][1]
ret_type = core.block_type(ret_type, ret_shape) if ret_shape is not None else ret_type
return core.tensor(func(lib_name, lib_path, symbol, arg_list, ret_type.to_ir(_builder)), ret_type)
def elementwise(lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, _builder=None):
'''
Dispatch an elementwise function to a library
:param lib_name: the name of the library
:param lib_path: the path of the library
:param args: the arguments of the function
:param arg_type_symbol_dict: the type of the arguments
:param _builder: the builder
:return: the return value of the function
'''
dispatch_args = args.copy()
if len(args) == 1:
dispatch_args[0] = core._to_tensor(dispatch_args[0], _builder)
ret_shape = dispatch_args[0].shape
elif len(args) == 2:
dispatch_args[0] = core._to_tensor(dispatch_args[0], _builder)
dispatch_args[1] = core._to_tensor(dispatch_args[1], _builder)
dispatch_args[0], dispatch_args[1] = semantic.binary_op_type_checking_impl(
dispatch_args[0], dispatch_args[1], _builder)
ret_shape = dispatch_args[0].shape
else:
for i in range(len(dispatch_args)):
dispatch_args[i] = core._to_tensor(dispatch_args[i], _builder)
broadcast_arg = dispatch_args[0]
# Get the broadcast shape over all the arguments
for i in range(len(dispatch_args)):
_, broadcast_arg = semantic.binary_op_type_checking_impl(
dispatch_args[i], broadcast_arg, _builder)
# Change the shape of each argument based on the broadcast shape
for i in range(len(dispatch_args)):
dispatch_args[i], _ = semantic.binary_op_type_checking_impl(
dispatch_args[i], broadcast_arg, _builder)
ret_shape = broadcast_arg.shape
func = getattr(_builder, "create_extern_elementwise")
return dispatch(func, lib_name, lib_path, dispatch_args, arg_type_symbol_dict, ret_shape, _builder)
class ExternalFunction:
'''
A wrapper for external functions
'''
def __init__(self, fn):
self.fn = fn
def __call__(self, *args, **kwargs):
if '_builder' not in kwargs or \
kwargs['_builder'] is None:
raise ValueError("Did you forget to add @triton.jit ? (`_builder` argument must be provided outside of JIT functions.)")
return self.fn(*args, **kwargs)
def extern(fn):
'''
A decorator for external functions
'''
return ExternalFunction(fn)
| triton-master | python/triton/language/extern.py |
import torch
import triton
import triton.language as tl
def next_power_of_2(n):
n -= 1
n |= n >> 1
n |= n >> 2
n |= n >> 4
n |= n >> 8
n |= n >> 16
n += 1
return n
def num_warps(N):
if N < 2048:
return 4
elif N < 8192:
return 8
return 16
@triton.heuristics({'num_warps': lambda nargs: num_warps(nargs['N'])})
@triton.heuristics({'BLOCK': lambda nargs: next_power_of_2(nargs['N'])})
@triton.jit
def _forward(LOGITS, PROBS, IDX, LOSS, N, BLOCK: tl.constexpr):
row = tl.program_id(0)
cols = tl.arange(0, BLOCK)
idx = tl.load(IDX + row)
# pointers to logit and probs
LOGITS = LOGITS + row * N + cols
WRIT_PROBS = PROBS + row * N + cols
READ_PROBS = PROBS + row * N + idx
# write-back negative log-probs
logits = tl.load(LOGITS, mask=cols < N, other=-float('inf'))
logits = logits.to(tl.float32)
logits = logits - tl.max(logits, 0)
probs = tl.log(tl.sum(tl.exp(logits), 0)) - logits
tl.store(WRIT_PROBS, probs, mask=cols < N)
# There is a bug in the compiler, which fails to insert a barrier here.
# We add it explicitly for now. Will be fixed soon.
tl.debug_barrier()
# write-back loss
probs = tl.load(READ_PROBS)
tl.store(LOSS + row, probs)
@triton.heuristics({'num_warps': lambda nargs: num_warps(nargs['N'])})
@triton.heuristics({'BLOCK': lambda nargs: next_power_of_2(nargs['N'])})
@triton.jit
def _backward(PROBS, IDX, DPROBS, N, BLOCK: tl.constexpr):
row = tl.program_id(0)
cols = tl.arange(0, BLOCK)
idx = tl.load(IDX + row)
# pointers to probs
PROBS = PROBS + row * N + cols
# We know d(-log(p[i])/dlogit[k] = -id_mat[i,k] + p[k]
# and we have -log(p[k]) stored in PROBS, so this is easy
probs = -tl.load(PROBS, mask=cols < N, other=float('inf'))
probs = tl.exp(probs.to(tl.float32))
delta = cols == idx
# write result in-place in PROBS
dout = tl.load(DPROBS + row)
din = (probs - delta) * dout
tl.store(PROBS, din.to(PROBS.dtype.element_ty), mask=cols < N)
class _cross_entropy(torch.autograd.Function):
@classmethod
def forward(cls, ctx, logits, indices):
# make sure we can use triton
assert (indices.dtype == torch.int64), "Indices are expected to be of type long."
# make kernel
device, dtype = logits.device, logits.dtype
n_cols = logits.shape[-1]
# run the kernel
result = torch.empty_like(indices, dtype=dtype, device=device)
neg_logprobs = torch.empty_like(logits, dtype=dtype, device=device)
grid = lambda opt: (logits.numel() // n_cols, )
_forward[grid](logits, neg_logprobs, indices, result, n_cols)
# save for backward
ctx.save_for_backward(neg_logprobs, indices)
return result
@classmethod
def backward(cls, ctx, dneg_logprobs):
"""We know d(-log(p[i])/dlogit[k] = -id_mat[i,k] + p[k]
so we initialize the gradient as neg_logprobs, so we can just exponentiate
to get p[k], which is most of what we need... neg_logprobs will be
modified in place to become the gradient we want
"""
# load saved tensors
neg_logprobs, indices = ctx.saved_tensors
# run the kernel
# neg_logprobs will be modified in place to become our gradient:
n_cols = neg_logprobs.shape[-1]
grid = lambda opt: (neg_logprobs.numel() // n_cols, )
_backward[grid](neg_logprobs, indices, dneg_logprobs, n_cols)
return neg_logprobs, None
cross_entropy = _cross_entropy.apply
| triton-master | python/triton/ops/cross_entropy.py |
import torch
import triton
import triton.language as tl
from .matmul_perf_model import early_config_prune, estimate_matmul_time
def init_to_zero(name):
return lambda nargs: nargs[name].zero_()
def get_configs_io_bound():
configs = []
for num_stages in [2, 3, 4, 5, 6]:
for block_m in [16, 32]:
for block_k in [32, 64]:
for block_n in [32, 64, 128, 256]:
num_warps = 2 if block_n <= 64 else 4
configs.append(
triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': 1},
num_stages=num_stages, num_warps=num_warps))
# split_k
for split_k in [2, 4, 8, 16]:
configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
return configs
@triton.heuristics({
'EVEN_K': lambda args: args['K'] % (args['BLOCK_K'] * args['SPLIT_K']) == 0,
})
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 32, 'SPLIT_K': 1}, num_stages=5, num_warps=2),
# good for int8
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_M': 256, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_M': 256, 'BLOCK_N': 64, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 256, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 128, 'BLOCK_K': 128, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 64, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 128, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 128, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_M': 64, 'BLOCK_N': 32, 'BLOCK_K': 64, 'SPLIT_K': 1}, num_stages=5, num_warps=2),
] + get_configs_io_bound(),
key=['M', 'N', 'K'],
prune_configs_by={
'early_config_prune': early_config_prune,
'perf_model': estimate_matmul_time,
'top_k': 10
},
)
@triton.jit
def _kernel(A, B, C, M, N, K,
stride_am, stride_ak,
stride_bk, stride_bn,
stride_cm, stride_cn,
BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr,
GROUP_M: tl.constexpr, SPLIT_K: tl.constexpr, EVEN_K: tl.constexpr,
ACC_TYPE: tl.constexpr
):
# matrix multiplication
pid = tl.program_id(0)
pid_z = tl.program_id(1)
grid_m = (M + BLOCK_M - 1) // BLOCK_M
grid_n = (N + BLOCK_N - 1) // BLOCK_N
# re-order program ID for better L2 performance
width = GROUP_M * grid_n
group_id = pid // width
group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
pid_m = group_id * GROUP_M + (pid % group_size)
pid_n = (pid % width) // (group_size)
# do matrix multiplication
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
rk = pid_z * BLOCK_K + tl.arange(0, BLOCK_K)
# pointers
A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=ACC_TYPE)
for k in range(K, 0, -BLOCK_K * SPLIT_K):
if EVEN_K:
a = tl.load(A)
b = tl.load(B)
else:
a = tl.load(A, mask=rk[None, :] < k, other=0.)
b = tl.load(B, mask=rk[:, None] < k, other=0.)
acc += tl.dot(a, b)
A += BLOCK_K * SPLIT_K * stride_ak
B += BLOCK_K * SPLIT_K * stride_bk
acc = acc.to(C.dtype.element_ty)
# rematerialize rm and rn to save registers
rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
C = C + (rm[:, None] * stride_cm + rn[None, :] * stride_cn)
mask = (rm < M)[:, None] & (rn < N)[None, :]
# handles write-back with reduction-splitting
if SPLIT_K == 1:
tl.store(C, acc, mask=mask)
else:
tl.atomic_add(C, acc, mask=mask)
class _matmul(torch.autograd.Function):
kernel = _kernel
_locks = dict()
@staticmethod
def _call(a, b):
device = a.device
# handle non-contiguous inputs if necessary
if a.stride(0) > 1 and a.stride(1) > 1:
a = a.contiguous()
if b.stride(0) > 1 and b.stride(1) > 1:
b = b.contiguous()
# checks constraints
assert a.shape[1] == b.shape[0], "incompatible dimensions"
M, K = a.shape
_, N = b.shape
# allocates output
c = torch.empty((M, N), device=device, dtype=a.dtype)
# accumulator types
ACC_TYPE = tl.float32 if a.dtype in [torch.float16, torch.bfloat16, torch.float32] else tl.int32
# launch kernel
grid = lambda META: (triton.cdiv(M, META['BLOCK_M']) * triton.cdiv(N, META['BLOCK_N']), META['SPLIT_K'])
_kernel[grid](a, b, c, M, N, K,
a.stride(0), a.stride(1),
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
GROUP_M=8, ACC_TYPE=ACC_TYPE)
return c
@staticmethod
def forward(ctx, a, b):
return _matmul._call(a, b)
matmul = _matmul.apply
| triton-master | python/triton/ops/matmul.py |
# flake8: noqa: F401
#from .conv import _conv, conv
from . import blocksparse
from .cross_entropy import _cross_entropy, cross_entropy
from .matmul import _matmul, matmul
| triton-master | python/triton/ops/__init__.py |
import heapq
import torch
import triton
import triton._C.libtriton.triton as _triton
from triton.testing import get_dram_gbps, get_max_simd_tflops, get_max_tensorcore_tflops
def get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype):
''' return compute throughput in TOPS '''
total_warps = num_ctas * min(num_warps, 4)
num_subcores = _triton.runtime.num_sm(backend, device) * 4 # on recent GPUs
tflops = min(num_subcores, total_warps) / num_subcores * get_max_tensorcore_tflops(dtype, backend, device)
return tflops
def get_simd_tflops(backend, device, num_ctas, num_warps, dtype):
''' return compute throughput in TOPS '''
total_warps = num_ctas * min(num_warps, 4)
num_subcores = _triton.runtime.num_sm(backend, device) * 4 # on recent GPUs
tflops = min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, backend, device)
return tflops
def get_tflops(backend, device, num_ctas, num_warps, dtype):
cc = _triton.runtime.cc(backend, device)
if cc < 80 and dtype == torch.float32:
return get_simd_tflops(backend, device, num_ctas, num_warps, dtype)
return get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype)
def estimate_matmul_time(
# backend, device,
num_warps, num_stages,
A, B, C,
M, N, K,
BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K,
debug=False, **kwargs
):
''' return estimated running time in ms
= max(compute, loading) + store '''
backend = _triton.runtime.backend.CUDA
device = torch.cuda.current_device()
dtype = A.dtype
dtsize = A.element_size()
num_cta_m = triton.cdiv(M, BLOCK_M)
num_cta_n = triton.cdiv(N, BLOCK_N)
num_cta_k = SPLIT_K
num_ctas = num_cta_m * num_cta_n * num_cta_k
# If the input is smaller than the block size
M, N = max(M, BLOCK_M), max(N, BLOCK_N)
# time to compute
total_ops = 2 * M * N * K / (1024 * 1024 * 1024) # GOPS
tput = get_tflops(backend, device, num_ctas, num_warps, dtype)
compute_ms = total_ops / tput
# time to load data
num_sm = _triton.runtime.num_sm(backend, device)
active_cta_ratio = min(1, num_ctas / num_sm)
active_cta_ratio_bw1 = min(1, num_ctas / 32) # 32 active ctas are enough to saturate
active_cta_ratio_bw2 = max(min(1, (num_ctas - 32) / (108 - 32)), 0) # 32-108, remaining 5%
dram_bw = get_dram_gbps(backend, device) * (active_cta_ratio_bw1 * 0.95 + active_cta_ratio_bw2 * 0.05) # in GB/s
l2_bw = dram_bw * 4 # rough estimation (should be 4.7 for A100?)
# assume 80% of (following) loads are in L2 cache
load_a_dram = M * K * dtsize * (1 + 0.2 * (num_cta_n - 1))
load_a_l2 = M * K * dtsize * 0.8 * (num_cta_n - 1)
load_b_dram = N * K * dtsize * (1 + 0.2 * (num_cta_m - 1))
load_b_l2 = N * K * dtsize * 0.8 * (num_cta_m - 1)
# total
total_dram = (load_a_dram + load_b_dram) / (1024 * 1024) # MB
total_l2 = (load_a_l2 + load_b_l2) / (1024 * 1024)
# loading time in ms
load_ms = total_dram / dram_bw + total_l2 / l2_bw
# estimate storing time
store_bw = dram_bw * 0.6 # :o
store_c_dram = M * N * dtsize * SPLIT_K / (1024 * 1024) # MB
if SPLIT_K == 1:
store_ms = store_c_dram / store_bw
else:
reduce_bw = store_bw
store_ms = store_c_dram / reduce_bw
# c.zero_()
zero_ms = M * N * 2 / (1024 * 1024) / store_bw
store_ms += zero_ms
total_time_ms = max(compute_ms, load_ms) + store_ms
if debug:
print(f'Total time: {total_time_ms}ms, compute time: {compute_ms}ms, '
f'loading time: {load_ms}ms, store time: {store_ms}ms, '
f'Activate CTAs: {active_cta_ratio*100}%')
return total_time_ms
def early_config_prune(configs, named_args):
backend = _triton.runtime.backend.CUDA
device = torch.cuda.current_device()
cc = _triton.runtime.cc(backend, device)
# BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages
dtsize = named_args['A'].element_size()
dtype = named_args['A'].dtype
# 1. make sure we have enough smem
pruned_configs = []
for config in configs:
kw = config.kwargs
BLOCK_M, BLOCK_N, BLOCK_K, num_stages = \
kw['BLOCK_M'], kw['BLOCK_N'], kw['BLOCK_K'], config.num_stages
max_shared_memory = _triton.runtime.max_shared_memory(backend, device)
required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize
if required_shared_memory <= max_shared_memory:
pruned_configs.append(config)
configs = pruned_configs
# Some dtypes do not allow atomic_add
if dtype not in [torch.float16, torch.float32]:
configs = [config for config in configs if config.kwargs['SPLIT_K'] == 1]
# group configs by (BLOCK_M,_N,_K, SPLIT_K, num_warps)
configs_map = {}
for config in configs:
kw = config.kwargs
BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages = \
kw['BLOCK_M'], kw['BLOCK_N'], kw['BLOCK_K'], kw['SPLIT_K'], config.num_warps, config.num_stages
key = (BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps)
if key in configs_map:
configs_map[key].append((config, num_stages))
else:
configs_map[key] = [(config, num_stages)]
pruned_configs = []
for k, v in configs_map.items():
BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps = k
if cc >= 80:
# compute cycles (only works for ampere GPUs)
mmas = BLOCK_M * BLOCK_N * BLOCK_K / (16 * 8 * 16)
mma_cycles = mmas / min(4, num_warps) * 8
ldgsts_latency = 300 # Does this matter?
optimal_num_stages = ldgsts_latency / mma_cycles
# nearest stages, prefer large #stages
nearest = heapq.nsmallest(2, v, key=lambda x: 10 + abs(x[1] - optimal_num_stages)
if (x[1] - optimal_num_stages) < 0 else x[1] - optimal_num_stages)
for n in nearest:
pruned_configs.append(n[0])
else: # Volta & Turing only supports num_stages <= 2
random_config = v[0][0]
random_config.num_stages = 2
pruned_configs.append(random_config)
return pruned_configs
| triton-master | python/triton/ops/matmul_perf_model.py |
import torch
import triton
import triton.language as tl
# ********************************************************
# --------------------------------------------------------
# Sparse = Dense x Dense (SDD)
# This operation uses super-blocking to make sure that
# it's done efficiently when small blocks can be grouped
# together
# --------------------------------------------------------
# ********************************************************
@triton.heuristics({
'EVEN_K': lambda nargs: nargs['K'] % nargs['TILE_K'] == 0,
})
@triton.jit
def _sdd_kernel(
A, B, C,
stride_za, stride_ha, stride_ma, stride_ak,
stride_zb, stride_hb, stride_bk, stride_nb,
stride_zc, stride_hc, stride_mc, stride_nc,
K, grid_offset, lut,
TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr,
BLOCK: tl.constexpr, EVEN_K: tl.constexpr
):
# ------------ #
# - Prologue - #
# ------------ #
block_id = tl.program_id(1) + grid_offset
lut += block_id * 3
# offsets
off_z = tl.program_id(2) # batch
off_h = tl.load(lut + 0) # head
# initialize pointers to A
start_am = tl.load(lut + 1)
offs_am = start_am * BLOCK + (tl.arange(0, TILE_M) % BLOCK)
offs_ak = tl.arange(0, TILE_K)
a_ptrs = A \
+ off_z * stride_za \
+ off_h * stride_ha \
+ offs_am[:, None] * stride_ma \
+ offs_ak[None, :] * stride_ak
# initialize pointers to B
start_bn = tl.load(lut + 2)
offs_bn = start_bn * BLOCK + (tl.arange(0, TILE_N) % BLOCK)
offs_bk = tl.arange(0, TILE_K)
b_ptrs = B \
+ off_z * stride_zb \
+ off_h * stride_hb \
+ offs_bn[None, :] * stride_nb \
+ offs_bk[:, None] * stride_bk
# ---------------- #
# Inner Loop #
# ---------------- #
acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
for k in range(K, 0, -TILE_K):
if EVEN_K:
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
else:
a = tl.load(a_ptrs, mask=offs_ak[None, :] < k, other=0.)
b = tl.load(b_ptrs, mask=offs_bk[:, None] < k, other=0.)
acc += tl.dot(a, b)
a_ptrs += TILE_K * stride_ak
b_ptrs += TILE_K * stride_bk
c = acc.to(C.dtype.element_ty)
# ---------------- #
# Epilogue #
# ---------------- #
offs_cm = tl.arange(0, TILE_M) % BLOCK
offs_cn = tl.arange(0, TILE_N) % BLOCK
pc = C \
+ off_z * stride_zc \
+ block_id * stride_hc \
+ offs_cm[:, None] * stride_mc \
+ offs_cn[None, :] * stride_nc
tl.store(pc, c, mask=True)
def sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, widths, out=None):
if a.stride(2) != 1 and a.stride(3) != 1:
a = a.contiguous()
if b.stride(2) != 1 and b.stride(3) != 1:
b = b.contiguous()
# (A * B)^T = B^T * A^T
if trans_c:
a, b = b, a
trans_a, trans_b = not trans_b, not trans_a
# shape constraints
a_dim = -2 if trans_a else -1
b_dim = -1 if trans_b else -2
Ka, Kb = a.shape[a_dim], b.shape[b_dim]
if Ka != Kb:
raise ValueError(f"Inner dimension mismatch (A: {Ka} vs B: {Kb})")
# allocate output
if out is None:
c = torch.empty((a.shape[0], lut.shape[0], block, block), dtype=a.dtype, device=a.device)
else:
assert out.shape == (a.shape[0], lut.shape[0], block, block)
c = out
grid = [1, c.shape[1], c.shape[0]]
_sdd_kernel[grid](
a, b, c,
a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3),
b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3),
c.stride(0), c.stride(1), c.stride(2), c.stride(3),
Ka, 0, lut,
TILE_M=block, TILE_N=block, TILE_K=32, BLOCK=block, num_stages=4,
num_warps=4,
)
return c
def sdd_lut(layout, block, device):
lut = layout.nonzero(as_tuple=False).to(device).int()
lut = lut.contiguous()
return lut, None
# -----------------------------
# Dense = Sparse x Dense (DSD)
# This operation uses a look-up table that contains pre-computed pointer increments
# in order to minimize computations in the inner loop of the matmul kernel.
# -----------------------------
@triton.jit
def _dsd_kernel(
A, B, C,
stride_az, stride_ha, stride_am, stride_ak,
stride_zb, stride_hb, stride_bk, stride_bn,
stride_zc, stride_hc, stride_cm, stride_cn,
DS0, DS1, lut,
TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr, BLOCK: tl.constexpr
):
# ------------ #
# - Prologue - #
# ------------ #
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
num_pid_m = tl.num_programs(0)
num_pid_n = tl.num_programs(1)
pid_n, pid_m = tl.swizzle2d(pid_n, pid_m, num_pid_n, num_pid_m, GROUP_SIZE_M)
pidz = tl.program_id(2)
header = lut + pid_n * 4
offset = tl.load(header + 0)
K = tl.load(header + 1)
column = tl.load(header + 2)
off_h = tl.load(header + 3)
pinc = lut + offset
# initialize pointers to A (sparse)
block_id = tl.load(pinc + 1)
block_id = tl.multiple_of(block_id, 8) # compiler hint
offs_am = tl.arange(0, TILE_M)
offs_ak = tl.arange(0, TILE_K)
pa = A + pidz * stride_az \
+ block_id * stride_ha \
+ offs_am[:, None] * stride_am \
+ offs_ak[None, :] * stride_ak
# initialize pointers to B (dense)
offs_bn = pid_m * TILE_N + tl.arange(0, TILE_N)
offs_bn = tl.max_contiguous(tl.multiple_of(offs_bn % DS0, TILE_N), TILE_N)
start_bk = tl.load(pinc)
start_bk = tl.multiple_of(start_bk, 8) # compiler hint
offs_bk = start_bk + tl.arange(0, TILE_K)
pb = B + pidz * stride_zb \
+ off_h * stride_hb \
+ offs_bn[None, :] * stride_bn \
+ offs_bk[:, None] * stride_bk
# ---------------- #
# Inner Loop #
# ---------------- #
acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
pinc += 2
inc_a = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.load(pinc)
inc_b = tl.multiple_of(inc_b, 8)
for k in range(K, 0, -TILE_K):
a = tl.load(pa, mask=True)
b = tl.load(pb, mask=offs_bn[None, :] < DS0)
acc += tl.dot(a, b)
pa += inc_a
pb += inc_b * stride_bk
pinc += 2
inc_a = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.load(pinc)
inc_b = tl.multiple_of(inc_b, 8)
c = acc.to(C.dtype.element_ty)
# initialize pointers to C
offs_cm = column * TILE_M + tl.arange(0, TILE_M)
offs_cn = pid_m * TILE_N + tl.arange(0, TILE_N)
pc = C \
+ off_h * stride_hc \
+ pidz * stride_zc \
+ offs_cm[:, None] * stride_cm \
+ offs_cn[None, :] * stride_cn
tl.store(pc, c, mask=offs_cn[None, :] < DS0)
def dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None):
if a.stride(2) != 1 and a.stride(3) != 1:
a = a.contiguous()
if b.stride(2) != 1 and b.stride(3) != 1:
b = b.contiguous()
# shapes / dtypes
AS1 = block * spdims[2 if trans_a else 1]
BS0 = b.size(0)
BS1 = b.size(1)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
# allocate output
CS0 = BS0
CS1 = BS1
CS2 = BS3 if trans_c else AS1
CS3 = AS1 if trans_c else BS3
if out is None:
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
else:
assert out.shape == (CS0, CS1, CS2, CS3)
c = out
# meta-parameter heuristics
TILE_N = 128
# compute output
grid = lambda meta: [triton.cdiv(BS3, meta['TILE_N']), width, BS0]
_dsd_kernel[grid](
a, b, c,
a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3),
b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3),
c.stride(0), c.stride(1), c.stride(3 if trans_c else 2), c.stride(2 if trans_c else 3),
BS3, AS1, lut,
TILE_M=block, TILE_N=TILE_N, TILE_K=min(block, 32), BLOCK=block, num_stages=4,
num_warps=4, GROUP_SIZE_M=4,
)
# exit()
return c
def dsd_lut(layout, block, step, trans, device):
"""
Generates the look-up table for incrementing pointers in the DSD/DDS matmul.
Example (BLOCK=32, STEP=16)
[[1, 0, 0, 1, 0],
[0, 1, 1, 0, 1],
[1, 0, 1, 0, 0]]
Then the offsets for A are
[0 , 16, 32, 48] <- row 0
\\----/ \\----/
col=0 col=3
[64, 80, 96, 112, 128, 144] <- row 1
\\----/ \\----/ \\------/
col=1 col=2 col=3
[160, 176, 192, 208]
which leads to increments table
[0, 16, 16, 16, || 64, 16, 16, 16, 16, 16, || 160, 16, 16, 16]
Because B is dense, the offsets are
[0, 16, 96, 112] <- row 0
[32, 48, 64, 80] <- row 1
[0, 16, 64, 80] <- row 2
"""
sizes = torch.sum(layout, 2 if trans else 1)
head_id, col_id = torch.ones_like(sizes).nonzero(as_tuple=True)
sizes = sizes.flatten()
segments = sizes * step
# pointer increments
if trans:
nnz = layout.nonzero(as_tuple=False)
else:
nnz = layout.transpose(1, 2).nonzero(as_tuple=False)
num_blocks = nnz.size(0)
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
# -------------------------------
# dense input pointer increments
# -------------------------------
# Note that the inner loop matmul kernel may have a fixed step size (e.g., TILE_K)
# that is smaller than the block size, so we need to do a bit of extra work
# to handle this case
B_idx = nnz[:, 2] * block
B_incs = B_idx.clone()
B_incs[1:] -= B_idx[:-1]
div = block // step
B_incs = B_incs.view(-1, 1).repeat(1, div)
B_incs[:, 1:] = step
B_incs[:, 0] -= (div - 1) * step
# first increment for each reduction is actually the offset
B_incs[offsets[segments > 0], 0] = B_idx[offsets[segments > 0]]
B_incs = B_incs.view(-1)
# -------------------------------
# sparse input pointer increments
# -------------------------------
# same as above, except that the increments are in the sparse memory layout
if trans:
A_idx = torch.arange(num_blocks, device=layout.device)
else:
A_idx = torch.tensor([], dtype=torch.int64, device=layout.device)
current_offset = 0
for z in range(layout.size(0)):
layoutw = layout[z, :, :].clone().long()
msum = layoutw.sum()
layoutw[layoutw > 0] = 1 + torch.arange(msum, device=layout.device)
A_idx = torch.cat((A_idx, current_offset + layoutw.T[layoutw.T > 0] - 1))
current_offset += msum
A_incs = A_idx * block * block
A_incs[1:] -= A_idx[:-1] * block * block
A_incs = A_incs.view(-1, 1).repeat(1, div)
if trans:
A_incs[:, 1:] = step
A_incs[:, 0] -= (div - 1) * step
else:
A_incs[:, 1:] = step * block
A_incs[:, 0] -= (div - 1) * step * block
A_incs[offsets[segments > 0], 0] = A_idx[offsets[segments > 0]]
A_incs = A_incs.view(-1)
# create header
width = col_id.size(0)
offsets = offsets * 2 * div + 4 * width
segments = segments * div
header = torch.stack((offsets, segments, col_id, head_id), dim=1).view(-1).contiguous()
# create increments
incs = torch.stack((B_incs, A_incs), dim=1).view(-1).contiguous()
# pad by a factor 2*MAX_NUM_STAGES
# to accomodate pre-fetching inside the kernel
pad = torch.zeros(20, device=incs.device, dtype=incs.dtype)
incs = torch.cat((incs, pad))
# create lut
lut = torch.cat((header, incs))
lut = lut.type(torch.int32).to(device)
# create locks
return lut, width
# -----------------------------
# Dense = Dense x Sparse (DDS)
# -----------------------------
# AB = (B^T A^T)^T
def dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None):
return dsd_matmul(b, a, not trans_b, not trans_a, not trans_c, spdims, block, lut, width, out=out)
##############
# MAIN API #
##############
class _matmul(torch.autograd.Function):
fn = {'sdd': sdd_matmul, 'dsd': dsd_matmul, 'dds': dds_matmul}
@staticmethod
def forward(
ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block,
c_lut, c_width, da_lut, da_width, db_lut, db_width, out
):
c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_width, out=out)
# save for backward
ctx.save_for_backward(a, b)
ctx.da_lut = da_lut
ctx.da_width = da_width
ctx.db_lut = db_lut
ctx.db_width = db_width
ctx.mode = mode
ctx.spdims = spdims
ctx.block = block
ctx.trans_a = trans_a
ctx.trans_b = trans_b
ctx.trans_c = trans_c
ctx.has_out = out is not None
return c
@staticmethod
def backward(ctx, dc):
# saved for backward
a, b = ctx.saved_tensors
da, db = None, None
mode = ctx.mode
# gradients w.r.t. a
if ctx.needs_input_grad[0]:
mode_da = mode[1] + mode[0] + mode[2]
da = _matmul.fn[mode_da](
dc, b, ctx.trans_c, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block, ctx.da_lut, ctx.da_width,
)
# gradients w.r.t. b
if ctx.needs_input_grad[1]:
mode_db = mode[2] + mode[1] + mode[0]
db = _matmul.fn[mode_db](
a, dc, not ctx.trans_a, ctx.trans_c, ctx.trans_b, ctx.spdims, ctx.block, ctx.db_lut, ctx.db_width,
)
dout = dc if ctx.has_out else None
return da, db, None, None, None,\
None, None, None, None,\
None, None, None, None, None, dout
class matmul:
def __init__(self, layout, block, mode, device, trans_a=False, trans_b=False, trans_c=False):
if mode not in ['sdd', 'dsd', 'dds']:
raise NotImplementedError('Supported modes are: sdd, dsd, dds')
self.block = block
self.mode = mode
self.trans_a = trans_a
self.trans_b = trans_b
self.trans_c = trans_c
self.layout = layout
self.spdims = layout.shape
step = min(block, 32)
if self.mode == 'sdd':
self.c_lut, self.c_width = sdd_lut(layout, block, device)
self.da_lut, self.da_width = dsd_lut(layout, block, step, True, device)
self.db_lut, self.db_width = dsd_lut(layout, block, step, False, device)
if self.mode == 'dsd':
self.c_lut, self.c_width = dsd_lut(layout, block, step, not self.trans_a, device)
self.da_lut, self.da_width = sdd_lut(layout, block, device)
self.db_lut, self.db_width = dsd_lut(layout, block, step, self.trans_a, device)
if self.mode == 'dds':
self.c_lut, self.c_width = dsd_lut(layout, block, step, self.trans_b, device)
self.da_lut, self.da_width = dsd_lut(layout, block, step, not self.trans_b, device)
self.db_lut, self.db_width = sdd_lut(layout, block, device)
def __call__(self, a, b, out=None):
c = _matmul.apply(
a, b, self.trans_a, self.trans_b, self.trans_c, self.mode, self.spdims, self.block,
self.c_lut, self.c_width,
self.da_lut, self.da_width,
self.db_lut, self.db_width,
out
)
return c
| triton-master | python/triton/ops/blocksparse/matmul.py |
# flake8: noqa: F401
from .matmul import matmul
from .softmax import softmax
| triton-master | python/triton/ops/blocksparse/__init__.py |
import torch
import triton
import triton.language as tl
def num_warps(n):
if n <= 128:
return 1
if n <= 256:
return 2
if n <= 512:
return 4
if n <= 4096:
return 8
return 16
@triton.jit
def _blocksparse_softmax_fwd(
Out, A, stride_xz, LUT,
R, extent, stride_zr, stride_hr, # relative attention
scale, is_causal,
ROW_SIZE: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
IS_DENSE: tl.constexpr,
):
h = tl.program_id(0)
m = tl.program_id(1)
z = tl.program_id(2)
# create index ranges
hm = h * tl.num_programs(1) + m
lane_n = tl.arange(0, ROW_SIZE) % BLOCK_SIZE
block_n = tl.arange(0, ROW_SIZE) // BLOCK_SIZE
# extract information from LUT
header = LUT + (hm // BLOCK_SIZE) * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# pointer offset
off_a = z * stride_xz
off_a += (offset + block_n) * BLOCK_SIZE * BLOCK_SIZE # block indx
off_a += (m % BLOCK_SIZE) * BLOCK_SIZE # row indx
# do not need to read column indices in the dense case
if IS_DENSE:
ns = tl.arange(0, ROW_SIZE)
else:
off_lut = offset + 2 * tl.num_programs(0) * tl.num_programs(1) // BLOCK_SIZE
start_n = tl.load(LUT + off_lut + block_n, mask=block_n < size, other=0)
ns = start_n * BLOCK_SIZE + lane_n
# load X
mask = block_n < size
a = tl.load(A + off_a + lane_n, mask=mask, other=-float("inf"))
a = a.to(tl.float32)
# compute
out = a
out *= scale
# apply relative attention
if R is not None:
R += z * stride_zr
R += h * stride_hr
off_lo = (extent - m - 1) + ns
mask_lo = (off_lo >= 0) & (off_lo < extent)
rel_logits = tl.load(R + m * extent + off_lo, mask=mask_lo, other=0.0)
out += rel_logits
out = out.to(tl.float32)
# apply causal mask
out = tl.where((ns > m) & is_causal, -float("inf"), out)
# computation
out = tl.softmax(out)
# write-back
tl.store(Out + off_a + lane_n, out, mask=mask)
@triton.jit
def _blocksparse_softmax_bwd(
DA, stride_zdx,
DOut, stride_zdout,
Out, stride_zout,
scale,
LUT,
DR, extent, stride_zr, stride_hr, stride_er,
is_causal,
ROW_SIZE: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
IS_DENSE: tl.constexpr,
):
h = tl.program_id(0)
m = tl.program_id(1)
z = tl.program_id(2)
# create index ranges
hm = h * tl.num_programs(1) + m
lane_n = tl.arange(0, ROW_SIZE) % BLOCK_SIZE
block_n = tl.arange(0, ROW_SIZE) // BLOCK_SIZE
# extract information from LUT
header = LUT + (hm // BLOCK_SIZE) * 2
size = tl.load(header + 0)
offset = tl.load(header + 1)
# row-col offset
off_mn = (offset + block_n) * BLOCK_SIZE * BLOCK_SIZE
off_mn += (m % BLOCK_SIZE) * BLOCK_SIZE
mask = block_n < size
# pointers
As = Out + z * stride_zout + off_mn
DOuts = DOut + z * stride_zdout + off_mn
# do not need to read column indices in the dense case
if IS_DENSE:
ns = tl.arange(0, ROW_SIZE)
else:
off_lut = offset + 2 * tl.num_programs(0) * tl.num_programs(1) // BLOCK_SIZE
start_n = tl.load(LUT + off_lut + block_n, mask=mask, other=0)
ns = start_n * BLOCK_SIZE + lane_n
# load data
a = tl.load(As + lane_n, mask=mask, other=0.0)
a = a.to(tl.float32)
dout = tl.load(DOuts + lane_n, mask=mask, other=0.0)
dout = dout.to(tl.float32)
# compute
da = a * (dout - tl.sum(a * dout, 0))
da = tl.where((ns > m) & is_causal, 0., da)
# apply relative attention
if DR is not None:
DR += z * stride_zr
DR += h * stride_hr
off_lo = (extent - m - 1) + ns
mask_lo = (off_lo >= 0) & (off_lo < extent) & mask
tl.store(DR + m * extent + off_lo, da, mask=mask_lo)
da = da * scale
# convert da
# write-back
DAs = DA + z * stride_zdx + off_mn
tl.store(DAs + lane_n, da, mask=mask)
class _softmax(torch.autograd.Function):
@staticmethod
def make_lut(layout, block, device):
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
sizes = _empty.clone()
# sizes along rows
for h in range(layout.shape[0]):
sizes = torch.cat((sizes, layout[h, :, :].sum(-1)))
total_sizes = sizes * block
# offsets in block format
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
# block indices
columns = layout.nonzero(as_tuple=False)[:, 2]
header = torch.stack((sizes, offsets), dim=1).view(-1)
lut = torch.cat((header, columns)).type(torch.int32).to(device)
return lut, int(total_sizes.max())
@staticmethod
def forward(
ctx, a, scale, rel_logits, is_causal,
spdims, block, lut, maxlut, is_dense
):
if scale is not None and isinstance(scale, torch.Tensor):
assert scale.device.type == "cpu"
scale = scale.item()
M = a.shape[0]
grid = [spdims[0], spdims[1] * block, M]
rel_shape = (1, 1, 1, 1) if rel_logits is None else rel_logits.shape
rel_strides = (1, 1, 1, 1) if rel_logits is None else rel_logits.stride()
# enqueue kernel
out = torch.empty_like(a)
_blocksparse_softmax_fwd[grid](
out, a, a.stride(0), lut,
rel_logits, rel_shape[-1], rel_strides[0], rel_strides[1], # relative attn
scale,
is_causal,
BLOCK_SIZE=block,
ROW_SIZE=triton.next_power_of_2(maxlut),
IS_DENSE=is_dense,
num_warps=num_warps(maxlut)
)
# save to context
# ctx.mark_dirty(x)
ctx.save_for_backward(out, lut)
ctx.spdims = spdims
ctx.block = block
ctx.maxlut = maxlut
ctx.scale = scale
ctx.rel_shape = rel_shape
ctx.rel_strides = rel_strides
ctx.rel_dtype = a.dtype
ctx.is_dense = is_dense
ctx.is_causal = is_causal
return out
@staticmethod
def backward(ctx, dout):
# retrieve from context
out, lut = ctx.saved_tensors
# relative logits gradients
dr = None
if ctx.needs_input_grad[3]:
dr = torch.zeros(ctx.rel_shape, dtype=ctx.rel_dtype, device=out.device)
# run kernel
M = out.shape[0]
grid = (ctx.spdims[0], ctx.spdims[1] * ctx.block, M)
da = torch.empty_like(dout)
_blocksparse_softmax_bwd[grid](
da, da.stride(0),
dout, dout.stride(0),
out, out.stride(0),
ctx.scale,
lut,
dr, ctx.rel_shape[-1], ctx.rel_strides[0], ctx.rel_strides[1], ctx.rel_strides[2],
ctx.is_causal,
BLOCK_SIZE=ctx.block,
ROW_SIZE=triton.next_power_of_2(ctx.maxlut),
IS_DENSE=ctx.is_dense,
num_warps=num_warps(ctx.maxlut)
)
return (da, None, None, dr, None,
None, None, None, None, None,
None,
None, None, None,
None,
None, None, None
)
class softmax:
def __init__(self, layout, block, device, is_dense=False):
self.spdims = layout.shape
self.layout = layout
self.block = block
self.lut, self.maxlut = _softmax.make_lut(self.layout, self.block, device)
self.is_dense = is_dense
def __call__(self, a, *, scale=1.0, rel_logits=None, is_causal=False):
if rel_logits is not None and rel_logits.dtype != a.dtype:
raise ValueError("relative position embedding must be %s" % a.dtype)
a = _softmax.apply(
a, scale, rel_logits, is_causal,
self.spdims, self.block, self.lut, self.maxlut, self.is_dense,
)
return a
| triton-master | python/triton/ops/blocksparse/softmax.py |
"""
Fused Attention
===============
This is a Triton implementation of the Flash Attention algorithm
(see: Dao et al., https://arxiv.org/pdf/2205.14135v2.pdf; Rabe and Staats https://arxiv.org/pdf/2112.05682v2.pdf)
"""
import pytest
import torch
import triton
import triton.language as tl
@triton.jit
def _fwd_kernel(
Q, K, V, sm_scale,
TMP, L, M, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug
Out,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on,
Z, H, N_CTX,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
offs_d = tl.arange(0, BLOCK_DMODEL)
off_q = off_hz * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk
off_k = off_hz * stride_qh + offs_n[:, None] * stride_kn + offs_d[None, :] * stride_kk
off_v = off_hz * stride_qh + offs_n[:, None] * stride_qm + offs_d[None, :] * stride_qk
# Initialize pointers to Q, K, V
q_ptrs = Q + off_q
k_ptrs = K + off_k
v_ptrs = V + off_v
# initialize pointer to m and l
t_ptrs = TMP + off_hz * N_CTX + offs_m
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# load q: it will stay in SRAM throughout
q = tl.load(q_ptrs)
# loop over k, v and update accumulator
for start_n in range(0, (start_m + 1) * BLOCK_M, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(k_ptrs + start_n * stride_kn)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k, trans_b=True)
qk *= sm_scale
qk += tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), 0, float("-inf"))
# -- compute m_ij, p, l_ij
m_ij = tl.max(qk, 1)
p = tl.exp(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.exp(m_i - m_i_new)
beta = tl.exp(m_ij - m_i_new)
l_i_new = alpha * l_i + beta * l_ij
# -- update output accumulator --
# scale p
p_scale = beta / l_i_new
p = p * p_scale[:, None]
# scale acc
acc_scale = l_i / l_i_new * alpha
tl.store(t_ptrs, acc_scale)
acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load
acc = acc * acc_scale[:, None]
# update acc
v = tl.load(v_ptrs + start_n * stride_vk)
p = p.to(tl.float16)
acc += tl.dot(p, v)
# update m_i and l_i
l_i = l_i_new
m_i = m_i_new
# rematerialize offsets to save registers
start_m = tl.program_id(0)
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
# write back l and m
l_ptrs = L + off_hz * N_CTX + offs_m
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(l_ptrs, l_i)
tl.store(m_ptrs, m_i)
# initialize pointers to output
offs_n = tl.arange(0, BLOCK_DMODEL)
off_o = off_hz * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on
out_ptrs = Out + off_o
tl.store(out_ptrs, acc)
@triton.jit
def _bwd_preprocess(
Out, DO, L,
NewDO, Delta,
BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = tl.arange(0, D_HEAD)
# load
o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
denom = tl.load(L + off_m).to(tl.float32)
# compute
do = do / denom[:, None]
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do)
tl.store(Delta + off_m, delta)
@triton.jit
def _bwd_kernel(
Q, K, V, sm_scale, Out, DO,
DQ, DK, DV,
L, M,
D,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
Z, H, N_CTX,
num_block,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
):
off_hz = tl.program_id(0)
off_z = off_hz // H
off_h = off_hz % H
# offset pointers for batch/head
Q += off_z * stride_qz + off_h * stride_qh
K += off_z * stride_qz + off_h * stride_qh
V += off_z * stride_qz + off_h * stride_qh
DO += off_z * stride_qz + off_h * stride_qh
DQ += off_z * stride_qz + off_h * stride_qh
DK += off_z * stride_qz + off_h * stride_qh
DV += off_z * stride_qz + off_h * stride_qh
for start_n in range(0, num_block):
lo = start_n * BLOCK_M
# initialize row/col offsets
offs_qm = lo + tl.arange(0, BLOCK_M)
offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M)
offs_m = tl.arange(0, BLOCK_N)
offs_k = tl.arange(0, BLOCK_DMODEL)
# initialize pointers to value-like data
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
v_ptrs = V + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
# pointer to row-wise quantities in value-like data
D_ptrs = D + off_hz * N_CTX
m_ptrs = M + off_hz * N_CTX
# initialize dv amd dk
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# k and v stay in SRAM throughout
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
# loop over rows
for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
offs_m_curr = start_m + offs_m
# load q, k, v, do on-chip
q = tl.load(q_ptrs)
# recompute p = softmax(qk, dim=-1).T
# NOTE: `do` is pre-divided by `l`; no normalization here
qk = tl.dot(q, k, trans_b=True)
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf"))
m = tl.load(m_ptrs + offs_m_curr)
p = tl.exp(qk * sm_scale - m[:, None])
# compute dv
do = tl.load(do_ptrs)
dv += tl.dot(p.to(tl.float16), do, trans_a=True)
# compute dp = dot(v, do)
Di = tl.load(D_ptrs + offs_m_curr)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
dp += tl.dot(do, v, trans_b=True)
# compute ds = p * (dp - delta[:, None])
ds = p * dp * sm_scale
# compute dk = dot(ds.T, q)
dk += tl.dot(ds.to(tl.float16), q, trans_a=True)
# # compute dq
dq = tl.load(dq_ptrs, eviction_policy="evict_last")
dq += tl.dot(ds.to(tl.float16), k)
tl.store(dq_ptrs, dq, eviction_policy="evict_last")
# # increment pointers
dq_ptrs += BLOCK_M * stride_qm
q_ptrs += BLOCK_M * stride_qm
do_ptrs += BLOCK_M * stride_qm
# write-back
dv_ptrs = DV + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, sm_scale):
BLOCK = 128
# shape constraints
Lq, Lk = q.shape[-1], k.shape[-1]
assert Lq == Lk
o = torch.empty_like(q)
grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1])
tmp = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_fwd_kernel[grid](
q, k, v, sm_scale,
tmp, L, m,
o,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
o.stride(0), o.stride(1), o.stride(2), o.stride(3),
q.shape[0], q.shape[1], q.shape[2],
BLOCK_M=BLOCK, BLOCK_N=BLOCK,
BLOCK_DMODEL=64, num_warps=4,
num_stages=1,
)
ctx.save_for_backward(q, k, v, o, L, m)
ctx.BLOCK = BLOCK
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = 64
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, l, m = ctx.saved_tensors
do = do.contiguous()
dq = torch.zeros_like(q, dtype=torch.float32)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
do_scaled = torch.empty_like(do)
delta = torch.empty_like(l)
_bwd_preprocess[(ctx.grid[0] * ctx.grid[1], )](
o, do, l,
do_scaled, delta,
BLOCK_M=ctx.BLOCK, D_HEAD=ctx.BLOCK_DMODEL,
)
_bwd_kernel[(ctx.grid[1],)](
q, k, v, ctx.sm_scale,
o, do_scaled,
dq, dk, dv,
l, m,
delta,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
q.shape[0], q.shape[1], q.shape[2],
ctx.grid[0],
BLOCK_M=ctx.BLOCK, BLOCK_N=ctx.BLOCK,
BLOCK_DMODEL=ctx.BLOCK_DMODEL, num_warps=8,
num_stages=1,
)
return dq, dk, dv, None
attention = _attention.apply
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(3, 2, 2048, 64)])
def test_op(Z, H, N_CTX, D_HEAD, dtype=torch.float16):
torch.manual_seed(20)
q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_()
sm_scale = 0.3
dout = torch.randn_like(q)
# reference implementation
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
for z in range(Z):
for h in range(H):
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, sm_scale)
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
triton.testing.assert_almost_equal(ref_out, tri_out)
triton.testing.assert_almost_equal(ref_dv, tri_dv)
triton.testing.assert_almost_equal(ref_dk, tri_dk)
triton.testing.assert_almost_equal(ref_dq, tri_dq)
try:
from flash_attn.flash_attn_interface import flash_attn_func
HAS_FLASH = True
except BaseException:
HAS_FLASH = False
BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
# vary seq length for fixed head and batch=4
configs = [triton.testing.Benchmark(
x_names=['N_CTX'],
x_vals=[2**i for i in range(10, 16)],
line_arg='provider',
line_vals=['triton'] + (['flash'] if HAS_FLASH else []),
line_names=['Triton'] + (['Flash'] if HAS_FLASH else []),
styles=[('red', '-'), ('blue', '-')],
ylabel='ms',
plot_name=f'fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}',
args={'H': N_HEADS, 'BATCH': BATCH, 'D_HEAD': D_HEAD, 'dtype': torch.float16, 'mode': mode}
) for mode in ['bwd']]
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, D_HEAD, mode, provider, dtype=torch.float16, device="cuda"):
assert mode in ['fwd', 'bwd']
warmup = 25
rep = 100
if provider == "triton":
q = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
sm_scale = 1.3
fn = lambda: attention(q, k, v, sm_scale)
if mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn, percentiles=None, warmup=warmup, rep=rep)
return ms
if provider == "flash":
lengths = torch.full((BATCH,), fill_value=N_CTX, device=device)
cu_seqlens = torch.zeros((BATCH + 1,), device=device, dtype=torch.int32)
cu_seqlens[1:] = lengths.cumsum(0)
qkv = torch.randn((BATCH * N_CTX, 3, H, D_HEAD), dtype=dtype, device=device, requires_grad=True)
fn = lambda: flash_attn_func(qkv, cu_seqlens, 0., N_CTX, causal=True)
if mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn, percentiles=None, warmup=warmup, rep=rep)
return ms
# only works on A100 at the moment
# bench_flash_attention.run(save_path='.', print_data=True)
| triton-master | python/tutorials/06-fused-attention.py |
"""
Fused Softmax
=================
In this tutorial, you will write a fused softmax operation that is significantly faster
than PyTorch's native op for a particular class of matrices: those whose rows can fit in
the GPU's SRAM.
You will learn about:
- The benefits of kernel fusion for bandwidth-bound operations.
- Reduction operators in Triton.
"""
# %%
# Motivations
# ------------
# Custom GPU kernels for elementwise additions are educationally valuable but won't get you very far in practice.
# Let us consider instead the case of a simple (numerically stabilized) softmax operation:
import torch
import triton
import triton.language as tl
@torch.jit.script
def naive_softmax(x):
"""Compute row-wise softmax of X using native pytorch
We subtract the maximum element in order to avoid overflows. Softmax is invariant to
this shift.
"""
# read MN elements ; write M elements
x_max = x.max(dim=1)[0]
# read MN + M elements ; write MN elements
z = x - x_max[:, None]
# read MN elements ; write MN elements
numerator = torch.exp(z)
# read MN elements ; write M elements
denominator = numerator.sum(dim=1)
# read MN + M elements ; write MN elements
ret = numerator / denominator[:, None]
# in total: read 5MN + 2M elements ; wrote 3MN + 2M elements
return ret
# %%
# When implemented naively in PyTorch, computing :code:`y = naive_softmax(x)` for :math:`x \in R^{M \times N}`
# requires reading :math:`5MN + 2M` elements from DRAM and writing back :math:`3MN + 2M` elements.
# This is obviously wasteful; we'd prefer to have a custom "fused" kernel that only reads
# X once and does all the necessary computations on-chip.
# Doing so would require reading and writing back only :math:`MN` bytes, so we could
# expect a theoretical speed-up of ~4x (i.e., :math:`(8MN + 4M) / 2MN`).
# The `torch.jit.script` flags aims to perform this kind of "kernel fusion" automatically
# but, as we will see later, it is still far from ideal.
# %%
# Compute Kernel
# ----------------
# Our softmax kernel works as follows: each program loads a row of the input matrix X,
# normalizes it and writes back the result to the output Y.
# Note that one important limitation of Triton is that each block must have a
# power-of-two number of elements, so we need to internally "pad" each row and guard the
# memory operations properly if we want to handle any possible input shapes:
@triton.jit
def softmax_kernel(
output_ptr, input_ptr, input_row_stride, output_row_stride, n_cols,
BLOCK_SIZE: tl.constexpr
):
# The rows of the softmax are independent, so we parallelize across those
row_idx = tl.program_id(0)
# The stride represents how much we need to increase the pointer to advance 1 row
row_start_ptr = input_ptr + row_idx * input_row_stride
# The block size is the next power of two greater than n_cols, so we can fit each
# row in a single block
col_offsets = tl.arange(0, BLOCK_SIZE)
input_ptrs = row_start_ptr + col_offsets
# Load the row into SRAM, using a mask since BLOCK_SIZE may be > than n_cols
row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf'))
# Substract maximum for numerical stability
row_minus_max = row - tl.max(row, axis=0)
# Note that exponentials in Triton are fast but approximate (i.e., think __expf in CUDA)
numerator = tl.exp(row_minus_max)
denominator = tl.sum(numerator, axis=0)
softmax_output = numerator / denominator
# Write back output to DRAM
output_row_start_ptr = output_ptr + row_idx * output_row_stride
output_ptrs = output_row_start_ptr + col_offsets
tl.store(output_ptrs, softmax_output, mask=col_offsets < n_cols)
# %%
# We can create a helper function that enqueues the kernel and its (meta-)arguments for any given input tensor.
def softmax(x):
n_rows, n_cols = x.shape
# The block size is the smallest power of two greater than the number of columns in `x`
BLOCK_SIZE = triton.next_power_of_2(n_cols)
# Another trick we can use is to ask the compiler to use more threads per row by
# increasing the number of warps (`num_warps`) over which each row is distributed.
# You will see in the next tutorial how to auto-tune this value in a more natural
# way so you don't have to come up with manual heuristics yourself.
num_warps = 4
if BLOCK_SIZE >= 2048:
num_warps = 8
if BLOCK_SIZE >= 4096:
num_warps = 16
# Allocate output
y = torch.empty_like(x)
# Enqueue kernel. The 1D launch grid is simple: we have one kernel instance per row o
# f the input matrix
softmax_kernel[(n_rows,)](
y,
x,
x.stride(0),
y.stride(0),
n_cols,
num_warps=num_warps,
BLOCK_SIZE=BLOCK_SIZE,
)
return y
# %%
# Unit Test
# ----------
# %%
# We make sure that we test our kernel on a matrix with an irregular number of rows and columns.
# This will allow us to verify that our padding mechanism works.
torch.manual_seed(0)
x = torch.randn(1823, 781, device='cuda')
y_triton = softmax(x)
y_torch = torch.softmax(x, axis=1)
assert torch.allclose(y_triton, y_torch), (y_triton, y_torch)
# %%
# As expected, the results are identical.
# %%
# Benchmark
# -------------
# Here we will benchmark our operation as a function of the number of columns in the input matrix -- assuming 4096 rows.
# We will then compare its performance against (1) :code:`torch.softmax` and (2) the :code:`naive_softmax` defined above.
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['N'], # argument names to use as an x-axis for the plot
x_vals=[
128 * i for i in range(2, 100)
], # different possible values for `x_name`
line_arg='provider', # argument name whose value corresponds to a different line in the plot
line_vals=[
'triton',
'torch-native',
'torch-jit',
], # possible values for `line_arg``
line_names=[
"Triton",
"Torch (native)",
"Torch (jit)",
], # label name for the lines
styles=[('blue', '-'), ('green', '-'), ('green', '--')], # line styles
ylabel="GB/s", # label name for the y-axis
plot_name="softmax-performance", # name for the plot. Used also as a file name for saving the plot.
args={'M': 4096}, # values for function arguments not in `x_names` and `y_name`
)
)
def benchmark(M, N, provider):
x = torch.randn(M, N, device='cuda', dtype=torch.float32)
if provider == 'torch-native':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.softmax(x, axis=-1))
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: softmax(x))
if provider == 'torch-jit':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: naive_softmax(x))
gbps = lambda ms: 2 * x.nelement() * x.element_size() * 1e-9 / (ms * 1e-3)
return gbps(ms), gbps(max_ms), gbps(min_ms)
benchmark.run(show_plots=True, print_data=True)
# %%
# In the above plot, we can see that:
#
# - Triton is 4x faster than the Torch JIT. This confirms our suspicions that the Torch JIT does not do any fusion here.
# - Triton is noticeably faster than :code:`torch.softmax` -- in addition to being **easier to read, understand and maintain**.
# Note however that the PyTorch `softmax` operation is more general and will works on tensors of any shape.
| triton-master | python/tutorials/02-fused-softmax.py |
"""
Vector Addition
=================
In this tutorial, you will write a simple vector addition using Triton and learn about:
- The basic programming model of Triton
- The `triton.jit` decorator, which is used to define Triton kernels.
- The best practices for validating and benchmarking your custom ops against native reference implementations
"""
# %%
# Compute Kernel
# --------------------------
import torch
import triton
import triton.language as tl
@triton.jit
def add_kernel(
x_ptr, # *Pointer* to first input vector
y_ptr, # *Pointer* to second input vector
output_ptr, # *Pointer* to output vector
n_elements, # Size of the vector
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process
# NOTE: `constexpr` so it can be used as a shape value
):
# There are multiple 'program's processing different data. We identify which program
# we are here
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0
# This program will process inputs that are offset from the initial data.
# for instance, if you had a vector of length 256 and block_size of 64, the programs
# would each access the elements [0:64, 64:128, 128:192, 192:256].
# Note that offsets is a list of pointers
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
# Create a mask to guard memory operations against out-of-bounds accesses
mask = offsets < n_elements
# Load x and y from DRAM, masking out any extra elements in case the input is not a
# multiple of the block size
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
# Write x + y back to DRAM
tl.store(output_ptr + offsets, output, mask=mask)
# %%
# Let's also declare a helper function to (1) allocate the `z` tensor
# and (2) enqueue the above kernel with appropriate grid/block sizes.
def add(x: torch.Tensor, y: torch.Tensor):
# We need to preallocate the output
output = torch.empty_like(x)
assert x.is_cuda and y.is_cuda and output.is_cuda
n_elements = output.numel()
# The SPMD launch grid denotes the number of kernel instances that run in parallel.
# It is analogous to CUDA launch grids. It can be either Tuple[int], or Callable(metaparameters) -> Tuple[int]
# In this case, we use a 1D grid where the size is the number of blocks
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
# NOTE:
# - each torch.tensor object is implicitly converted into a pointer to its first element.
# - `triton.jit`'ed functions can be index with a launch grid to obtain a callable GPU kernel
# - don't forget to pass meta-parameters as keywords arguments
add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=1024)
# We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
# running asynchronously at this point.
return output
# %%
# We can now use the above function to compute the element-wise sum of two `torch.tensor` objects and test its correctness:
torch.manual_seed(0)
size = 98432
x = torch.rand(size, device='cuda')
y = torch.rand(size, device='cuda')
output_torch = x + y
output_triton = add(x, y)
print(output_torch)
print(output_triton)
print(
f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}'
)
# %%
# Seems like we're good to go!
# %%
# Benchmark
# -----------
# We can now benchmark our custom op on vectors of increasing sizes to get a sense of how it does relative to PyTorch.
# To make things easier, Triton has a set of built-in utilities that allow us to concisely plot the performance of your custom ops
# for different problem sizes.
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['size'], # argument names to use as an x-axis for the plot
x_vals=[
2 ** i for i in range(12, 28, 1)
], # different possible values for `x_name`
x_log=True, # x axis is logarithmic
line_arg='provider', # argument name whose value corresponds to a different line in the plot
line_vals=['triton', 'torch'], # possible values for `line_arg`
line_names=['Triton', 'Torch'], # label name for the lines
styles=[('blue', '-'), ('green', '-')], # line styles
ylabel='GB/s', # label name for the y-axis
plot_name='vector-add-performance', # name for the plot. Used also as a file name for saving the plot.
args={}, # values for function arguments not in `x_names` and `y_name`
)
)
def benchmark(size, provider):
x = torch.rand(size, device='cuda', dtype=torch.float32)
y = torch.rand(size, device='cuda', dtype=torch.float32)
if provider == 'torch':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: x + y)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: add(x, y))
gbps = lambda ms: 12 * size / ms * 1e-6
return gbps(ms), gbps(max_ms), gbps(min_ms)
# %%
# We can now run the decorated function above. Pass `print_data=True` to see the performance number, `show_plots=True` to plot them, and/or
# `save_path='/path/to/results/' to save them to disk along with raw CSV data
benchmark.run(print_data=True, show_plots=True)
| triton-master | python/tutorials/01-vector-add.py |
"""
Libdevice function
===============
Triton can invoke a custom function from an external library.
In this example, we will use the `libdevice` library to apply `asin` on a tensor.
Please refer to https://docs.nvidia.com/cuda/libdevice-users-guide/index.html regarding the semantics of all available libdevice functions.
In `trition/language/libdevice.py`, we try to aggregate functions with the same computation but different data types together.
For example, both `__nv_asin` and `__nvasinf` calculate the principal value of the arc sine of the input, but `__nv_asin` operates on `double` and `__nv_asinf` operates on `float`.
Using triton, you can simply call `tl.libdevice.asinf`.
triton automatically selects the correct underlying device function to invoke based on input and output types.
"""
# %%
# asin Kernel
# --------------------------
import torch
import triton
import triton.language as tl
@triton.jit
def asin_kernel(
x_ptr,
y_ptr,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
x = tl.libdevice.asin(x)
tl.store(y_ptr + offsets, x, mask=mask)
# %%
# Using the default libdevice library path
# --------------------------
# We can use the default libdevice library path encoded in `triton/language/libdevice.py`
torch.manual_seed(0)
size = 98432
x = torch.rand(size, device='cuda')
output_triton = torch.zeros(size, device='cuda')
output_torch = torch.asin(x)
assert x.is_cuda and output_triton.is_cuda
n_elements = output_torch.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
asin_kernel[grid](x, output_triton, n_elements, BLOCK_SIZE=1024)
print(output_torch)
print(output_triton)
print(
f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}'
)
# %%
# Customize the libdevice library path
# --------------------------
# We can also customize the libdevice library path by passing the path to the `libdevice` library to the `asin` kernel.
output_triton = torch.empty_like(x)
asin_kernel[grid](x, output_triton, n_elements, BLOCK_SIZE=1024,
extern_libs={'libdevice': '/usr/local/cuda/nvvm/libdevice/libdevice.10.bc'})
print(output_torch)
print(output_triton)
print(
f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}'
)
| triton-master | python/tutorials/07-libdevice-function.py |
"""
Low-Memory Dropout
=================
In this tutorial, you will write a memory-efficient implementation of dropout whose state
will be composed of a single int32 seed. This differs from more traditional implementations of dropout,
whose state is generally composed of a bit mask tensor of the same shape as the input. You will learn about:
- The limitations of naive implementations of Dropout with PyTorch
- Parallel pseudo-random number generation in Triton
"""
# %%
# Baseline
# -------------
# The *dropout* operator was first introduced in [SRIVASTAVA2014]_ as a way to improve the performance
# of deep neural networks in low-data regime (i.e. regularization).
#
# It takes a vector as input and produces a vector of the same shape as output. Each scalar in the
# output has a probability :math:`p` of being changed to zero and otherwise it is copied from the input.
# This forces the network to perform well even when only :math:`1 - p` scalars from the input are available.
#
# At evaluation time we want to use the full power of the network so we set :math:`p=0`. Naively this would
# increase the norm of the output (which can be a bad thing, e.g. it can lead to artificial decrease
# in the output softmax temperature). To prevent this we multiply the output by :math:`\frac{1}{1 - p}`, which
# keeps the norm consistent regardless of the dropout probability.
#
# Let's first take a look at the baseline implementation.
import tabulate
import torch
import triton
import triton.language as tl
@triton.jit
def _dropout(
x_ptr, # pointer to the input
x_keep_ptr, # pointer to a mask of 0s and 1s
output_ptr, # pointer to the output
n_elements, # number of elements in the `x` tensor
p, # probability that an element of `x` is changed to zero
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# Load data
x = tl.load(x_ptr + offsets, mask=mask)
x_keep = tl.load(x_keep_ptr + offsets, mask=mask)
# The line below is the crucial part, described in the paragraph above!
output = tl.where(x_keep, x / (1 - p), 0.0)
# Write-back output
tl.store(output_ptr + offsets, output, mask=mask)
def dropout(x, x_keep, p):
output = torch.empty_like(x)
assert x.is_contiguous()
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_dropout[grid](x, x_keep, output, n_elements, p, BLOCK_SIZE=1024)
return output
# Input tensor
x = torch.randn(size=(10,)).cuda()
# Dropout mask
p = 0.5
x_keep = (torch.rand(size=(10,)) > p).to(torch.int32).cuda()
#
output = dropout(x, x_keep=x_keep, p=p)
print(tabulate.tabulate([
["input"] + x.tolist(),
["keep mask"] + x_keep.tolist(),
["output"] + output.tolist()
]))
# %%
# Seeded dropout
# -------------
# Above implementation of dropout works fine, but it can be a bit awkward to deal with. Firstly
# we need to store the dropout mask for backpropagation. Secondly, dropout state management can get
# very tricky when using recompute/checkpointing (e.g. see all the notes about `preserve_rng_state` in
# https://pytorch.org/docs/1.9.0/checkpoint.html). In this tutorial we'll describe an alternative implementation
# that (1) has a smaller memory footprint; (2) requires less data movement; and (3) simplifies the management
# of persisting randomness across multiple invocations of the kernel.
#
# Pseudorandom number generation in Triton is simple! In this tutorial we will use the
# :code:`triton.language.rand` function which generates a block of uniformly distributed :code:`float32`
# values in [0, 1), given a seed and a block of :code:`int32` offsets. But if you need it, Triton also provides
# other :ref:`random number generation strategies <Random Number Generation>`.
#
# .. note::
# Triton's implementation of PRNG is based on the Philox algorithm (described on [SALMON2011]_).
#
# Let's put it all together.
@triton.jit
def _seeded_dropout(
x_ptr,
output_ptr,
n_elements,
p,
seed,
BLOCK_SIZE: tl.constexpr,
):
# compute memory offsets of elements handled by this instance
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
# load data from x
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
# randomly prune it
random = tl.rand(seed, offsets)
x_keep = random > p
# write-back
output = tl.where(x_keep, x / (1 - p), 0.0)
tl.store(output_ptr + offsets, output, mask=mask)
def seeded_dropout(x, p, seed):
output = torch.empty_like(x)
assert x.is_contiguous()
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_seeded_dropout[grid](x, output, n_elements, p, seed, BLOCK_SIZE=1024)
return output
x = torch.randn(size=(10,)).cuda()
# Compare this to the baseline - dropout mask is never instantiated!
output = seeded_dropout(x, p=0.5, seed=123)
output2 = seeded_dropout(x, p=0.5, seed=123)
output3 = seeded_dropout(x, p=0.5, seed=512)
print(tabulate.tabulate([
["input"] + x.tolist(),
["output (seed = 123)"] + output.tolist(),
["output (seed = 123)"] + output2.tolist(),
["output (seed = 512)"] + output3.tolist()
]))
# %%
# Et Voilà! We have a triton kernel that applies the same dropout mask provided the seed is the same!
# If you'd like explore further applications of pseudorandomness in GPU programming, we encourage you
# to explore the `triton/language/random` folder!
# %%
# Exercises
# -------------
# 1. Extend the kernel to operate over a matrix and use a vector of seeds - one per row.
# 2. Add support for striding.
# 3. (challenge) Implement a kernel for sparse Johnson-Lindenstrauss transform which generates the projection matrix one the fly each time using a seed.
# %%
# References
# --------------
#
# .. [SALMON2011] John K. Salmon, Mark A. Moraes, Ron O. Dror, and David E. Shaw, "Parallel Random Numbers: As Easy as 1, 2, 3", 2011
# .. [SRIVASTAVA2014] Nitish Srivastava and Geoffrey Hinton and Alex Krizhevsky and Ilya Sutskever and Ruslan Salakhutdinov, "Dropout: A Simple Way to Prevent Neural Networks from Overfitting", JMLR 2014
| triton-master | python/tutorials/04-low-memory-dropout.py |
"""
Matrix Multiplication
======================
In this tutorial, you will write a 25-lines high-performance FP16 matrix multiplication
kernel that achieves performance on par with cuBLAS.
You will specifically learn about:
- Block-level matrix multiplications
- Multi-dimensional pointer arithmetic
- Program re-ordering for improved L2 cache hit rate
- Automatic performance tuning
"""
# %%
# Motivations
# -------------
# Matrix multiplications are a key building block of most modern high-performance computing systems.
# They are notoriously hard to optimize, hence their implementation is generally done by
# hardware vendors themselves as part of so-called "kernel libraries" (e.g., cuBLAS).
# Unfortunately, these libraries are often proprietary and cannot be easily customized
# to accomodate the needs of modern deep learning workloads (e.g., fused activation functions).
# In this tutorial, you will learn how to implement efficient matrix multiplications by
# yourself with Triton, in a way that is easy to customize and extend.
#
# Roughly speaking, the kernel that we will write will implement the following blocked
# algorithm to multiply a (M, K) by a (K, N) matrix:
#
# .. code-block:: python
#
# # do in parallel
# for m in range(0, M, BLOCK_SIZE_M):
# # do in parallel
# for n in range(0, N, BLOCK_SIZE_N):
# acc = zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=float32)
# for k in range(0, K, BLOCK_SIZE_K):
# a = A[m : m+BLOCK_SIZE_M, k : k+BLOCK_SIZE_K]
# b = B[k : k+BLOCK_SIZE_K, n : n+BLOCK_SIZE_N]
# acc += dot(a, b)
# C[m : m+BLOCK_SIZE_M, n : n+BLOCK_SIZE_N] = acc;
#
# where each iteration of the doubly-nested for-loop is performed by a dedicated Triton program instance.
# %%
# Compute Kernel
# ----------------
#
# The above algorithm is, actually, fairly straightforward to implement in Triton.
# The main difficulty comes from the computation of the memory locations at which blocks
# of :code:`A` and :code:`B` must be read in the inner loop. For that, we need
# multi-dimensional pointer arithmetics.
#
# Pointer Arithmetics
# ~~~~~~~~~~~~~~~~~~~~
#
# For a row-major 2D tensor :code:`X`, the memory location of :code:`X[i, j]` is given b
# y :code:`&X[i, j] = X + i*stride_xi + j*stride_xj`.
# Therefore, blocks of pointers for :code:`A[m : m+BLOCK_SIZE_M, k:k+BLOCK_SIZE_K]` and
# :code:`B[k : k+BLOCK_SIZE_K, n : n+BLOCK_SIZE_N]` can be defined in pseudo-code as:
#
# .. code-block:: python
#
# &A[m : m+BLOCK_SIZE_M, k:k+BLOCK_SIZE_K] = a_ptr + (m : m+BLOCK_SIZE_M)[:, None]*A.stride(0) + (k : k+BLOCK_SIZE_K)[None, :]*A.stride(1);
# &B[k : k+BLOCK_SIZE_K, n:n+BLOCK_SIZE_N] = b_ptr + (k : k+BLOCK_SIZE_K)[:, None]*B.stride(0) + (n : n+BLOCK_SIZE_N)[None, :]*B.stride(1);
#
# Which means that pointers for blocks of A and B can be initialized (i.e., :code:`k=0`) in Triton as:
#
# .. code-block:: python
#
# offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
# offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
# offs_k = tl.arange(0, BLOCK_SIZE_K)
# a_ptrs = a_ptr + (offs_am[:, None]*stride_am + offs_k [None, :]*stride_ak)
# b_ptrs = b_ptr + (offs_k [:, None]*stride_bk + offs_bn[None, :]*stride_bn)
#
# And then updated in the inner loop as follows:
#
# .. code-block:: python
#
# pa += BLOCK_SIZE_K * stride_ak;
# pb += BLOCK_SIZE_K * stride_bk;
#
#
# L2 Cache Optimizations
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# As mentioned above, each program instance computes a :code:`[BLOCK_SIZE_M, BLOCK_SIZE_N]`
# block of :code:`C`.
# It is important to remember that the order in which these blocks are computed does
# matter, since it affects the L2 cache hit rate of our program. and unfortunately, a
# a simple row-major ordering
#
# .. code-block:: Python
#
# pid = triton.program_id(0);
# grid_m = (M + BLOCK_SIZE_M - 1) // BLOCK_SIZE_M;
# grid_n = (N + BLOCK_SIZE_N - 1) // BLOCK_SIZE_N;
# pid_m = pid / grid_n;
# pid_n = pid % grid_n;
#
# is just not going to cut it.
#
# One possible solution is to launch blocks in an order that promotes data reuse.
# This can be done by 'super-grouping' blocks in groups of :code:`GROUP_M` rows before
# switching to the next column:
#
# .. code-block:: python
#
# # program ID
# pid = tl.program_id(axis=0)
# # number of program ids along the M axis
# num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
# # number of programs ids along the N axis
# num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
# # number of programs in group
# num_pid_in_group = GROUP_SIZE_M * num_pid_n
# # id of the group this program is in
# group_id = pid // num_pid_in_group
# # row-id of the first program in the group
# first_pid_m = group_id * GROUP_SIZE_M
# # if `num_pid_m` isn't divisible by `GROUP_SIZE_M`, the last group is smaller
# group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
# # *within groups*, programs are ordered in a column-major order
# # row-id of the program in the *launch grid*
# pid_m = first_pid_m + (pid % group_size_m)
# # col-id of the program in the *launch grid*
# pid_n = (pid % num_pid_in_group) // group_size_m
#
# For example, in the following matmul where each matrix is 9 blocks by 9 blocks,
# we can see that if we compute the output in row-major ordering, we need to load 90
# blocks into SRAM to compute the first 9 output blocks, but if we do it in grouped
# ordering, we only need to load 54 blocks.
# .. image:: grouped_vs_row_major_ordering.png
#
# In practice, this can improve the performance of our matrix multiplication kernel by
# more than 10\% on some hardware architecture (e.g., 220 to 245 TFLOPS on A100).
#
# %%
# Final Result
# -------------
#
import torch
import triton
import triton.language as tl
# %
# :code:`triton.jit`'ed functions can be auto-tuned by using the `triton.autotune`
# decorator, which consumes:
# - A list of :code:`triton.Config` objects that define different configurations of
# meta-parameters (e.g., BLOCK_SIZE_M) and compilation options (e.g., num_warps) to try
# - An autotuning *key* whose change in values will trigger evaluation of all the
# provided configs
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
],
key=['M', 'N', 'K'],
)
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. stride_am is how much to increase a_ptr
# by to get the element one row down (A has M rows)
stride_am, stride_ak,
stride_bk, stride_bn,
stride_cm, stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
ACTIVATION: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse
# See above `L2 Cache Optimizations` section for details
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# a_ptrs is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# b_ptrs is a block of [BLOCK_SIZE_K, BLOCK_SIZE_n] pointers
# see above `Pointer Arithmetics` section for details
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, K, BLOCK_SIZE_K):
# Note that for simplicity, we don't apply a mask here.
# This means that if K is not a multiple of BLOCK_SIZE_K,
# this will access out-of-bounds memory and produce an
# error or (worse!) incorrect results.
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
# We accumulate along the K dimension
accumulator += tl.dot(a, b)
# Advance the ptrs to the next K block
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# you can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# we can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul`
@triton.jit
def leaky_relu(x):
x = x + 1
return tl.where(x >= 0, x, 0.01 * x)
# %%
# We can now create a convenience wrapper function that only takes two input tensors
# and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel
def matmul(a, b, activation=""):
# checks constraints
assert a.shape[1] == b.shape[0], "incompatible dimensions"
assert a.is_contiguous(), "matrix A must be contiguous"
assert b.is_contiguous(), "matrix B must be contiguous"
M, K = a.shape
K, N = b.shape
assert (
K % 32 == 0
), "We don't check memory-out-of-bounds with K so K must be divisible by BLOCK_SIZE_K"
# allocates output
c = torch.empty((M, N), device=a.device, dtype=a.dtype)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),
)
matmul_kernel[grid](
a, b, c,
M, N, K,
a.stride(0), a.stride(1),
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
ACTIVATION=activation,
)
return c
# %%
# Unit Test
# -----------
#
# We can test our custom matrix multiplication operation against a native torch implementation (i.e., cuBLAS)
torch.manual_seed(0)
a = torch.randn((512, 512), device='cuda', dtype=torch.float16)
b = torch.randn((512, 512), device='cuda', dtype=torch.float16)
triton_output = matmul(a, b)
torch_output = torch.matmul(a, b)
print(f"triton_output={triton_output}")
print(f"torch_output={torch_output}")
if triton.testing.allclose(triton_output, torch_output):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
# %%
# Benchmark
# --------------
#
# Square Matrix Performance
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
# We can now compare the performance of our kernel against that of cuBLAS. Here we focus on square matrices, but feel free to arrange this script as you wish to benchmark any other matrix shape.
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['M', 'N', 'K'], # argument names to use as an x-axis for the plot
x_vals=[
128 * i for i in range(2, 33)
], # different possible values for `x_name`
line_arg='provider', # argument name whose value corresponds to a different line in the plot
# possible values for `line_arg``
line_vals=['cublas', 'cublas + relu', 'triton', 'triton + relu'],
# label name for the lines
line_names=["cuBLAS", "cuBLAS (+ torch.nn.LeakyReLU)", "Triton", "Triton (+ LeakyReLU)"],
# line styles
styles=[('green', '-'), ('green', '--'), ('blue', '-'), ('blue', '--')],
ylabel="TFLOPS", # label name for the y-axis
plot_name="matmul-performance", # name for the plot. Used also as a file name for saving the plot.
args={},
)
)
def benchmark(M, N, K, provider):
a = torch.randn((M, K), device='cuda', dtype=torch.float16)
b = torch.randn((K, N), device='cuda', dtype=torch.float16)
if provider == 'cublas':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b))
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b))
if provider == 'cublas + relu':
torch_relu = torch.nn.ReLU(inplace=True)
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: torch_relu(torch.matmul(a, b))
)
if provider == 'triton + relu':
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: matmul(a, b, activation="leaky_relu")
)
perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)
return perf(ms), perf(max_ms), perf(min_ms)
benchmark.run(show_plots=True, print_data=True)
| triton-master | python/tutorials/03-matrix-multiplication.py |
"""
Layer Normalization
====================
"""
import torch
import triton
import triton.language as tl
try:
# This is https://github.com/NVIDIA/apex, NOT the apex on PyPi, so it
# should not be added to extras_require in setup.py.
import apex
HAS_APEX = True
except ModuleNotFoundError:
HAS_APEX = False
@triton.jit
def _layer_norm_fwd_fused(
Out,
A,
Weight,
Bias,
Mean, Rstd,
stride, N, eps,
BLOCK_SIZE: tl.constexpr,
):
# position of elements processed by this program
row = tl.program_id(0)
Out += row * stride
A += row * stride
# compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0., eviction_policy="evict_last").to(tl.float32)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(A + cols, mask=cols < N, other=0., eviction_policy="evict_last").to(tl.float32)
a = tl.where(cols < N, a - mean, 0.)
_var += a * a
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# write-back mean/rstd
tl.store(Mean + row, mean)
tl.store(Rstd + row, rstd)
# multiply by weight and add bias
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
weight = tl.load(Weight + cols, mask=mask)
bias = tl.load(Bias + cols, mask=mask)
a = tl.load(A + cols, mask=mask, other=0., eviction_policy="evict_first").to(tl.float32)
a_hat = (a - mean) * rstd
out = a_hat * weight + bias
# # write-back
tl.store(Out + cols, out, mask=mask)
# Backward pass (DA + partial DW + partial DB)
@triton.jit
def _layer_norm_bwd_dx_fused(
_DA,
_DOut,
_A,
Weight,
Mean, Rstd,
stride, NumRows, NumCols, eps,
BLOCK_SIZE_N: tl.constexpr,
):
# position of elements processed by this program
pid = tl.program_id(0)
row = pid
A = _A + row * stride
DOut = _DOut + row * stride
DA = _DA + row * stride
mean = tl.load(Mean + row)
rstd = tl.load(Rstd + row)
# load data to SRAM
_mean1 = tl.zeros([BLOCK_SIZE_N], dtype=tl.float32)
_mean2 = tl.zeros([BLOCK_SIZE_N], dtype=tl.float32)
for off in range(0, NumCols, BLOCK_SIZE_N):
cols = off + tl.arange(0, BLOCK_SIZE_N)
mask = cols < NumCols
a = tl.load(A + cols, mask=mask, other=0).to(tl.float32)
dout = tl.load(DOut + cols, mask=mask, other=0).to(tl.float32)
weight = tl.load(Weight + cols, mask=mask, other=0).to(tl.float32)
a_hat = (a - mean) * rstd
wdout = weight * dout
_mean1 += a_hat * wdout
_mean2 += wdout
mean1 = tl.sum(_mean1, axis=0) / NumCols
mean2 = 0.
mean2 = tl.sum(_mean2, axis=0) / NumCols
for off in range(0, NumCols, BLOCK_SIZE_N):
cols = off + tl.arange(0, BLOCK_SIZE_N)
mask = cols < NumCols
a = tl.load(A + cols, mask=mask, other=0).to(tl.float32)
dout = tl.load(DOut + cols, mask=mask, other=0).to(tl.float32)
weight = tl.load(Weight + cols, mask=mask, other=0).to(tl.float32)
a_hat = (a - mean) * rstd
wdout = weight * dout
da = (wdout - (a_hat * mean1 + mean2)) * rstd
# write-back dx
tl.store(DA + cols, da, mask=mask)
# Backward pass (total DW + total DB)
@triton.jit
def _layer_norm_bwd_dwdb(
A, DOut,
Mean, Var,
DW,
DB,
M, N,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
):
pid = tl.program_id(0)
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
UNROLL: tl.constexpr = 4
for i in range(0, M, BLOCK_SIZE_M * UNROLL):
for j in range(UNROLL):
rows = i + j * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
mask = (rows[:, None] < M) & (cols[None, :] < N)
offs = rows[:, None] * N + cols[None, :]
a = tl.load(A + offs, mask=mask, other=0.).to(tl.float32)
dout = tl.load(DOut + offs, mask=mask, other=0.).to(tl.float32)
mean = tl.load(Mean + rows, mask=rows < M, other=0.)
rstd = tl.load(Var + rows, mask=rows < M, other=0.)
a_hat = (a - mean[:, None]) * rstd[:, None]
dw += dout * a_hat
db += dout
sum_dw = tl.sum(dw, axis=0)
sum_db = tl.sum(db, axis=0)
tl.store(DW + cols, sum_dw, mask=cols < N)
tl.store(DB + cols, sum_db, mask=cols < N)
class LayerNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, a, normalized_shape, weight, bias, eps):
# allocate output
out = torch.empty_like(a)
# reshape input data into 2D tensor
a_arg = a.reshape(-1, a.shape[-1])
M, N = a_arg.shape
mean = torch.empty((M,), dtype=torch.float32, device="cuda")
rstd = torch.empty((M,), dtype=torch.float32, device="cuda")
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // a.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
BLOCK_SIZE = max(BLOCK_SIZE, 128)
BLOCK_SIZE = min(BLOCK_SIZE, 4096)
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
_layer_norm_fwd_fused[(M,)](
out,
a_arg,
weight,
bias,
mean, rstd,
a_arg.stride(0), N, eps,
BLOCK_SIZE=BLOCK_SIZE,
num_warps=num_warps,
)
ctx.save_for_backward(
a, weight, bias, mean, rstd,
)
ctx.BLOCK_SIZE = BLOCK_SIZE
ctx.num_warps = num_warps
ctx.eps = eps
if hasattr(bias, "config"):
assert bias.config.grad_scale_name == weight.config.grad_scale_name
grad_scale_name = bias.config.grad_scale_name
else:
grad_scale_name = None
ctx.grad_scale_gain_bias_name = grad_scale_name
return out
@staticmethod
def backward(ctx, dout):
assert dout.is_contiguous()
a, weight, bias, mean, var = ctx.saved_tensors
# heuristics for amount of parallel reduction stream for DG/DB
N = weight.shape[0]
# allocate output
da = torch.empty_like(dout)
# enqueue kernel using forward pass heuristics
# also compute partial sums for DW and DB
x_arg = a.reshape(-1, a.shape[-1])
M, N = x_arg.shape
dweight = torch.empty((weight.shape[0],), dtype=weight.dtype, device=weight.device)
dbias = torch.empty((weight.shape[0],), dtype=weight.dtype, device=weight.device)
_layer_norm_bwd_dx_fused[(M,)](
da,
dout,
a,
weight,
mean, var,
x_arg.stride(0), M, N,
ctx.eps,
BLOCK_SIZE_N=ctx.BLOCK_SIZE,
num_warps=ctx.num_warps,
)
if N > 10240:
BLOCK_SIZE_N = 128
BLOCK_SIZE_M = 32
num_warps = 4
else:
# maximize occupancy for small N
BLOCK_SIZE_N = 16
BLOCK_SIZE_M = 16
num_warps = 8
grid = lambda meta: [triton.cdiv(N, meta["BLOCK_SIZE_N"])]
_layer_norm_bwd_dwdb[grid](
a, dout,
mean, var,
dweight,
dbias,
M,
N,
BLOCK_SIZE_M=BLOCK_SIZE_M,
BLOCK_SIZE_N=BLOCK_SIZE_N,
num_warps=num_warps
)
return (da, None, dweight, dbias, None)
def layer_norm(a, normalized_shape, weight, bias, eps):
return LayerNorm.apply(a, normalized_shape, weight, bias, eps)
def test_layer_norm(M, N, dtype, eps=1e-5, device='cuda'):
torch.manual_seed(0)
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device='cuda')
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
# forward pass
y_tri = layer_norm(x, w_shape, weight, bias, eps)
y_ref = torch.nn.functional.layer_norm(x, w_shape, weight, bias, eps).to(dtype)
# backward pass (triton)
y_tri.backward(dy, retain_graph=True)
dx_tri, dw_tri, db_tri = [_.grad.clone() for _ in [x, weight, bias]]
x.grad, weight.grad, bias.grad = None, None, None
# backward pass (torch)
y_ref.backward(dy, retain_graph=True)
dx_ref, dw_ref, db_ref = [_.grad.clone() for _ in [x, weight, bias]]
# compare
triton.testing.assert_almost_equal(y_tri, y_ref)
triton.testing.assert_almost_equal(dx_tri, dx_ref)
triton.testing.assert_almost_equal(db_tri, db_ref, decimal=1)
triton.testing.assert_almost_equal(dw_tri, dw_ref, decimal=1)
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['N'],
x_vals=[512 * i for i in range(2, 32)],
line_arg='provider',
line_vals=['triton', 'torch'] + (['apex'] if HAS_APEX else []),
line_names=['Triton', 'Torch'] + (['Apex'] if HAS_APEX else []),
styles=[('blue', '-'), ('green', '-'), ('orange', '-')],
ylabel='GB/s',
plot_name='layer-norm',
args={'M': 4096, 'dtype': torch.float16, 'mode': 'forward'}
)
)
def bench_layer_norm(M, N, dtype, provider, mode, eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device='cuda')
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
# utility functions
if provider == 'triton':
y_fwd = lambda: layer_norm(x, w_shape, weight, bias, eps)
if provider == 'torch':
y_fwd = lambda: torch.nn.functional.layer_norm(x, w_shape, weight, bias, eps)
if provider == 'apex':
apex_layer_norm = apex.normalization.FusedLayerNorm(w_shape).to(x.device).to(x.dtype)
y_fwd = lambda: apex_layer_norm(x)
# forward pass
if mode == 'forward':
gbps = lambda ms: 2 * x.numel() * x.element_size() / ms * 1e-6
ms, min_ms, max_ms = triton.testing.do_bench(y_fwd, rep=500)
# backward pass
if mode == 'backward':
gbps = lambda ms: 3 * x.numel() * x.element_size() / ms * 1e-6
y = y_fwd()
ms, min_ms, max_ms = triton.testing.do_bench(lambda: y.backward(dy, retain_graph=True),
grad_to_none=[x], rep=500)
return gbps(ms), gbps(max_ms), gbps(min_ms)
# test_layer_norm(1151, 8192, torch.float16)
bench_layer_norm.run(save_path='.', print_data=True)
| triton-master | python/tutorials/05-layer-norm.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Triton documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 10 01:19:09 2020.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
def process_sig(app, what, name, obj, options, signature, return_annotation):
if signature and '_builder' in signature:
signature = signature.split('_builder')[0] + ")"
return (signature, return_annotation)
def setup(app):
"""Customize function args retrieving to get args under decorator."""
import sphinx
import os
app.connect("autodoc-process-signature", process_sig)
os.system("pip install -e ../python")
def forward_jit_fn(func):
old = func
def wrapped(obj, **kwargs):
import triton
if isinstance(obj, triton.code_gen.JITFunction):
obj = obj.fn
return old(obj)
return wrapped
old_documenter = sphinx.ext.autosummary.get_documenter
def documenter(app, obj, parent):
import triton
if isinstance(obj, triton.code_gen.JITFunction):
obj = obj.fn
return old_documenter(app, obj, parent)
sphinx.ext.autosummary.get_documenter = documenter
sphinx.util.inspect.unwrap_all = forward_jit_fn(sphinx.util.inspect.unwrap_all)
sphinx.util.inspect.signature = forward_jit_fn(sphinx.util.inspect.signature)
sphinx.util.inspect.object_description = forward_jit_fn(sphinx.util.inspect.object_description)
# Auto Doc
import sys
import os
sys.path.insert(0, os.path.abspath('../python/'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.autosummary', 'sphinx.ext.coverage', 'sphinx.ext.napoleon', 'sphinx_multiversion']
autosummary_generate = True
# versioning config
smv_tag_whitelist = r'^(v1.1.2)$'
smv_branch_whitelist = r'^master$'
smv_remote_whitelist = None
smv_released_pattern = r'^tags/.*$'
smv_outputdir_format = '{ref.name}'
smv_prefer_remote_refs = False
# Sphinx gallery
extensions += ['sphinx_gallery.gen_gallery']
from sphinx_gallery.sorting import FileNameSortKey
sphinx_gallery_conf = {
'examples_dirs': '../python/tutorials/',
'gallery_dirs': 'getting-started/tutorials',
'filename_pattern': '',
'ignore_pattern': r'__init__\.py',
'within_subsection_order': FileNameSortKey,
'reference_url': {
'sphinx_gallery': None,
}
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
html_sidebars = {
'**': [
'_templates/versions.html',
],
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Triton'
copyright = '2020, Philippe Tillet'
author = 'Philippe Tillet'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/custom.css',
]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tritondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Triton.tex', 'Triton Documentation', 'Philippe Tillet', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'triton', 'Triton Documentation', [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Triton', 'Triton Documentation', author, 'Triton', 'One line description of project.', 'Miscellaneous'),
] | triton-master | docs/conf.py |
from setuptools import setup, find_packages
setup(
name = 'soundstorm-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.4',
license='MIT',
description = 'SoundStorm - Efficient Parallel Audio Generation from Google Deepmind, in Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/soundstorm-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'audio generation'
],
install_requires=[
'accelerate',
'audiolm-pytorch>=1.2.8',
'beartype',
'classifier-free-guidance-pytorch>=0.1.5',
'einops>=0.6.1',
'spear-tts-pytorch>=0.0.15',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| soundstorm-pytorch-main | setup.py |
import math
from random import random, randrange
from functools import wraps
from contextlib import nullcontext
from collections import namedtuple
from pathlib import Path
import torch
from torch import Tensor, nn, einsum
import torch.nn.functional as F
from einops import rearrange, reduce, repeat, unpack, pack
from einops.layers.torch import Rearrange, EinMix
from beartype import beartype
from beartype.door import is_bearable
from beartype.typing import Union, Dict, Optional, List, Optional
from soundstorm_pytorch.attend import Attend
from spear_tts_pytorch import TextToSemantic
from audiolm_pytorch import SoundStream
from audiolm_pytorch import HubertWithKmeans, FairseqVQWav2Vec
from tqdm import tqdm
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
def calc_same_padding(kernel_size):
pad = kernel_size // 2
return (pad, pad - (kernel_size + 1) % 2)
def eval_decorator(fn):
@wraps(fn)
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = logits.topk(k, dim = -1)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(2, ind, val)
return probs
def log(t, eps = 1e-10):
return torch.log(t + eps)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
# prob helpers
def sample_prob(prob):
return random() < prob
def coin_flip():
return sample_prob(0.5)
# tensor helpers
@beartype
def get_mask_subset_prob(
mask: Tensor,
prob: Union[float, Tensor],
min_mask: int = 0
):
batch, seq, device = *mask.shape, mask.device
if isinstance(prob, Tensor):
prob = rearrange(prob, 'b -> b 1')
num_to_mask = (mask.sum(dim = -1, keepdim = True) * prob).clamp(min = min_mask)
logits = torch.rand((batch, seq), device = device)
logits = logits.masked_fill(~mask, -1)
randperm = logits.argsort(dim = -1).float()
num_padding = (~mask).sum(dim = -1, keepdim = True)
randperm -= num_padding
subset_mask = randperm < num_to_mask
subset_mask.masked_fill_(~mask, False)
return subset_mask
# schedules
def linear_schedule(t):
return 1 - t
def cosine_schedule(t):
""" https://arxiv.org/abs/2202.04200 """
return torch.cos(t * math.pi / 2)
# rotary embedding
class RotaryEmbedding(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq, persistent = False)
@property
def device(self):
return next(self.buffers()).device
def forward(self, seq_len):
t = torch.arange(seq_len, device = self.device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
return freqs
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# t5 relative positional bias
class T5RelativePositionBias(nn.Module):
def __init__(
self,
scale = 1.,
num_buckets = 32,
max_distance = 128,
heads = 8
):
super().__init__()
self.scale = scale
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(
relative_position,
num_buckets = 32,
max_distance = 128
):
ret = 0
n = -relative_position
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(
val_if_large,
torch.full_like(val_if_large, num_buckets - 1)
)
ret += torch.where(is_small, n, val_if_large)
return ret
@property
def device(self):
return next(self.parameters()).device
def forward(self, n):
pos = torch.arange(n, device = self.device).long()
rel_pos = rearrange(pos, 'j -> 1 j') - rearrange(pos, 'i -> i 1')
rp_bucket = self._relative_position_bucket(rel_pos, num_buckets = self.num_buckets, max_distance = self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> h i j')
return bias * self.scale
# conformer
class Swish(nn.Module):
def forward(self, x):
return x * x.sigmoid()
class GLU(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
out, gate = x.chunk(2, dim=self.dim)
return out * gate.sigmoid()
class DepthWiseConv1d(nn.Module):
def __init__(self, chan_in, chan_out, kernel_size, padding):
super().__init__()
self.padding = padding
self.conv = nn.Conv1d(chan_in, chan_out, kernel_size, groups = chan_in)
def forward(self, x):
x = F.pad(x, self.padding)
return self.conv(x)
# attention, feedforward, and conv module
class Scale(nn.Module):
def __init__(self, scale, fn):
super().__init__()
self.fn = fn
self.scale = scale
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
class ChanLayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(1, dim, 1))
def forward(self, x):
eps = 1e-6 if x.dtype == torch.float32 else 1e-4
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * var.clamp(min = eps).rsqrt() * self.gamma
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class Attention(nn.Module):
def __init__(
self,
dim,
heads = 8,
dim_head = 64,
dropout = 0.,
flash = True
):
super().__init__()
inner_dim = dim_head * heads
self.heads= heads
self.scale = dim_head ** -0.5
self.attend = Attend(
flash = flash,
dropout = dropout
)
self.dropout = nn.Dropout(dropout)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(
self,
x,
context = None,
mask = None,
rotary_emb = None,
attn_bias = None
):
n, device, h, has_context = x.shape[-2], x.device, self.heads, exists(context)
context = default(context, x)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
if exists(rotary_emb):
q = apply_rotary_pos_emb(rotary_emb, q)
k = apply_rotary_pos_emb(rotary_emb, k)
out = self.attend(q, k, v, mask = mask, attn_bias = attn_bias)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class FeedForward(nn.Module):
def __init__(
self,
dim,
mult = 4,
dropout = 0.
):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
Swish(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class ConformerConvModule(nn.Module):
def __init__(
self,
dim,
causal = False,
expansion_factor = 2,
kernel_size = 31,
dropout = 0.
):
super().__init__()
inner_dim = dim * expansion_factor
padding = calc_same_padding(kernel_size) if not causal else (kernel_size - 1, 0)
self.net = nn.Sequential(
nn.LayerNorm(dim),
Rearrange('b n c -> b c n'),
nn.Conv1d(dim, inner_dim * 2, 1),
GLU(dim=1),
DepthWiseConv1d(inner_dim, inner_dim, kernel_size = kernel_size, padding = padding),
Swish(),
ChanLayerNorm(inner_dim),
nn.Conv1d(inner_dim, dim, 1),
Rearrange('b c n -> b n c'),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
# Conformer Block
class ConformerBlock(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
ff_mult = 4,
conv_expansion_factor = 2,
conv_kernel_size = 31,
attn_dropout = 0.,
attn_flash = True,
ff_dropout = 0.,
conv_dropout = 0.,
conv_causal = False
):
super().__init__()
self.ff1 = FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
self.attn = Attention(dim = dim, dim_head = dim_head, heads = heads, dropout = attn_dropout, flash = attn_flash)
self.conv = ConformerConvModule(dim = dim, causal = conv_causal, expansion_factor = conv_expansion_factor, kernel_size = conv_kernel_size, dropout = conv_dropout)
self.ff2 = FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout)
self.attn = PreNorm(dim, self.attn)
self.ff1 = Scale(0.5, PreNorm(dim, self.ff1))
self.ff2 = Scale(0.5, PreNorm(dim, self.ff2))
self.post_norm = nn.LayerNorm(dim)
def forward(
self,
x,
mask = None,
rotary_emb = None,
attn_bias = None
):
x = self.ff1(x) + x
x = self.attn(x, mask = mask, rotary_emb = rotary_emb, attn_bias = attn_bias) + x
x = self.conv(x) + x
x = self.ff2(x) + x
x = self.post_norm(x)
return x
# Conformer
class Conformer(nn.Module):
def __init__(
self,
dim,
*,
depth,
dim_head = 64,
heads = 8,
ff_mult = 4,
conv_expansion_factor = 2,
conv_kernel_size = 31,
attn_dropout = 0.,
ff_dropout = 0.,
conv_dropout = 0.,
conv_causal = False,
attn_flash = True,
t5_rel_pos_bias = False
):
super().__init__()
assert not (t5_rel_pos_bias and attn_flash), 'flash attention is not compatible with learned bias'
self.dim = dim
self.layers = nn.ModuleList([])
self.rotary_emb = RotaryEmbedding(dim_head) if not t5_rel_pos_bias else None
self.rel_pos_bias = T5RelativePositionBias(dim_head ** 0.5, heads = heads) if t5_rel_pos_bias else None
for _ in range(depth):
self.layers.append(ConformerBlock(
dim = dim,
dim_head = dim_head,
heads = heads,
ff_mult = ff_mult,
conv_expansion_factor = conv_expansion_factor,
conv_kernel_size = conv_kernel_size,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
conv_dropout = conv_dropout,
conv_causal = conv_causal,
attn_flash = attn_flash
))
def forward(self, x, mask = None):
seq_len = x.shape[-2]
rotary_emb = self.rotary_emb(seq_len) if exists(self.rotary_emb) else None
attn_bias = self.rel_pos_bias(seq_len) if exists(self.rel_pos_bias) else None
for block in self.layers:
x = block(
x,
mask = mask,
rotary_emb = rotary_emb,
attn_bias = attn_bias
)
return x
# conformer with sum reduction across quantized tokens at the beginning, along with heads
class ConformerWrapper(nn.Module):
@beartype
def __init__(
self,
*,
codebook_size,
num_quantizers,
conformer: Union[Conformer, Dict[str, any]],
grouped_quantizers = 1
):
super().__init__()
self.conformer = conformer
if isinstance(conformer, dict):
self.conformer = Conformer(**self.conformer)
dim = self.conformer.dim
self.embedding_proj = nn.Sequential(
nn.Linear(dim * grouped_quantizers, dim),
nn.LayerNorm(dim)
) if grouped_quantizers > 1 else nn.Identity()
num_codes_with_mask = codebook_size + 1
num_effective_quantizers = num_quantizers * grouped_quantizers
self.code_embeds = nn.Embedding(num_codes_with_mask * num_effective_quantizers, dim)
self.register_buffer('quantizer_offsets', torch.arange(num_effective_quantizers) * num_codes_with_mask, persistent = False)
self.register_buffer('mask_tokens', self.quantizer_offsets + num_codes_with_mask, persistent = False)
self.dim = dim
self.codebook_size = codebook_size
self.num_codes_with_mask = num_codes_with_mask
self.num_quantizers = num_quantizers
self.grouped_quantizers = grouped_quantizers
self.heads = nn.Sequential(
nn.Linear(dim, dim * num_effective_quantizers),
Rearrange('b n (h d) -> b (n h) d', h = num_effective_quantizers)
)
# each quantizer codebook would require its own logits weight and bias matrices
# the amazing einops makes this easy with 'EinMix'
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
Rearrange('b (n gq) d -> b n gq d', gq = num_effective_quantizers),
EinMix(
'b n gq d -> b n gq l',
weight_shape = 'gq d l',
bias_shape = 'gq l',
gq = num_effective_quantizers,
l = codebook_size,
d = dim
),
Rearrange('b ... d -> b (...) d')
)
def forward(
self,
x,
*,
mask = None,
cond = None,
sum_embeds = None,
return_embeddings = False,
return_logits_and_embeddings = False
):
"""
einops notation:
b - batch
n - sequence
g - groups
q - quantizers
d - feature dimension
"""
n, q, g = x.shape[-1], self.num_quantizers, self.grouped_quantizers
assert divisible_by(n, g * q), 'sequence must be divisible by number of quantizers'
x = rearrange(x, 'b (n gq) -> b n gq', gq = g * q)
x = x + self.quantizer_offsets
x = self.code_embeds(x)
x = reduce(x, 'b n (g q) d -> b n (g d)', 'sum', g = g)
x = self.embedding_proj(x)
if exists(sum_embeds):
x = x + sum_embeds
if exists(cond):
if cond.ndim == 2:
cond = rearrange(cond, 'b d -> b 1 d')
x = x + cond
x = self.conformer(x, mask = mask)
embeds = self.heads(x)
if return_embeddings or not exists(self.to_logits):
return embeds
logits = self.to_logits(embeds)
if return_logits_and_embeddings:
return logits, embeds
return logits
# for main logits as well as self token critic
class LogitHead(nn.Module):
def __init__(
self,
net: ConformerWrapper,
logit_dim
):
super().__init__()
self.net = net
dim = net.dim
self.to_logits = nn.Linear(dim, logit_dim)
def forward(self, x):
embed = self.net(x, return_embeddings = True)
return self.to_logits(embed)
# main soundstorm class, which is just a maskgit
LossBreakdown = namedtuple('LossBreakdown', ['generator_loss', 'critic_loss'])
class SoundStorm(nn.Module):
@beartype
def __init__(
self,
net: ConformerWrapper,
*,
soundstream: Optional[SoundStream] = None,
spear_tts_text_to_semantic: Optional[TextToSemantic] = None,
wav2vec: Optional[Union[HubertWithKmeans, FairseqVQWav2Vec]] = None,
steps = 18,
self_cond = False,
self_cond_train_prob = 0.75,
no_replace_prob = 0.15, # which percentage of the tokens masked will stay the same, done in original MLM paper
random_token_prob = 0.1, # which percentage of tokens to be replaced with random token, done in original MLM paper
schedule = 'linear',
can_mask_prev_unmasked = False, # when unmasking, whether it can remask previously unmasked
self_token_critic = False, # https://aclanthology.org/2021.naacl-main.409/
critic_loss_weight = 1.,
num_semantic_token_ids = None,
semantic_pad_id = -1,
pad_id = None,
wav2vec_target_sample_hz = None,
wav2vec_downsample_factor = None,
codec_target_sample_hz = None,
codec_downsample_factor = None,
):
super().__init__()
# conformer settings
self.net = net
dim = net.dim
self.dim = dim
self.num_tokens = net.codebook_size
self.pad_id = pad_id
# set soundstream
self.soundstream = soundstream
if exists(soundstream):
self.codec_target_sample_hz = soundstream.target_sample_hz
self.codec_downsample_factor = soundstream.downsample_factor
else:
self.codec_target_sample_hz = codec_target_sample_hz
self.codec_downsample_factor = codec_downsample_factor
if exists(self.soundstream):
assert net.grouped_quantizers == soundstream.rq_groups
assert net.codebook_size == soundstream.codebook_size
assert net.num_quantizers == soundstream.num_quantizers
# set text-to-semantic
self.text_to_semantic = spear_tts_text_to_semantic
if exists(spear_tts_text_to_semantic) and exists(spear_tts_text_to_semantic.wav2vec):
assert not exists(wav2vec), 'wav2vec model already supplied from the TextToSemantic instance from SpearTTS'
assert not (exists(wav2vec_downsample_factor) or exists(wav2vec_target_sample_hz)), 'wav2vec downsample factor and sampling freq being auto-set from the text-to-semantic module passed in, as it contains the wav2vec instance'
self.wav2vec = spear_tts_text_to_semantic.wav2vec
self.wav2vec_target_sample_hz = maybe_wav2vec.target_sample_hz
self.wav2vec_downsample_factor = maybe_wav2vec.downsample_factor
elif exists(wav2vec):
assert not (exists(wav2vec_downsample_factor) or exists(wav2vec_target_sample_hz)), 'wav2vec downsample factor and sampling freq being auto-set from the text-to-semantic module passed in, as it contains the wav2vec instance'
self.wav2vec = wav2vec
self.wav2vec_target_sample_hz = wav2vec.target_sample_hz
self.wav2vec_downsample_factor = wav2vec.downsample_factor
else:
self.wav2vec = None
self.wav2vec_target_sample_hz = wav2vec_target_sample_hz
self.wav2vec_downsample_factor = wav2vec_downsample_factor
# whether to text condition on audio generation is dependent on whether hyperparameters are supplied
self.should_condition = exists(self.wav2vec_downsample_factor) and exists(self.wav2vec_target_sample_hz)
# in the case that text-to-semantic module passed in
if self.should_condition:
assert exists(self.codec_target_sample_hz) and exists(self.codec_downsample_factor)
if exists(spear_tts_text_to_semantic):
self.semantic_token_emb = spear_tts_text_to_semantic.semantic_token_emb
self.num_semantic_token_ids = spear_tts_text_to_semantic.num_semantic_token_ids
self.semantic_cond_to_model_dim = nn.Linear(spear_tts_text_to_semantic.dim, net.dim)
self.semantic_pad_id = spear_tts_text_to_semantic.pad_id.get('speech')
else:
assert exists(num_semantic_token_ids), 'if you are conditioning, you must pass in the number of semantic token ids'
self.semantic_token_emb = nn.Embedding(num_semantic_token_ids, dim)
self.num_semantic_token_ids = num_semantic_token_ids
self.semantic_cond_to_model_dim = nn.Identity()
self.semantic_pad_id = semantic_pad_id
# detect token critic settings
self.num_quantizers = net.num_quantizers
self.grouped_quantizers = net.grouped_quantizers
self.mask_id = net.codebook_size
# afaict, maskgit paper did not do this
# but may help for self conditioning, as used successfully in original BERT
self.no_replace_prob = no_replace_prob
self.random_token_prob = random_token_prob
self.steps = steps
if callable(schedule):
self.schedule_fn = schedule
if schedule == 'linear':
self.schedule_fn = linear_schedule
elif schedule == 'cosine':
self.schedule_fn = cosine_schedule
else:
raise ValueError(f'invalid schedule {schedule}')
self.can_mask_prev_unmasked = can_mask_prev_unmasked
# self conditioning
self.self_cond = self_cond
if self_cond:
self.null_embed = nn.Parameter(torch.randn(dim))
self.to_self_cond = nn.Linear(dim, dim, bias = False) if self_cond else None
self.self_cond_train_prob = self_cond_train_prob
# token critic
self.token_critic = None
if self_token_critic:
self.token_critic = LogitHead(net, 1)
self.critic_loss_weight = critic_loss_weight
@property
def device(self):
return next(self.net.parameters()).device
def load(self, path, strict = True):
# Return pkg so that if this function gets called from within a Trainer function call,
# the trainer can also access the package loaded from the checkpoint.
path = Path(path)
assert path.exists()
pkg = torch.load(str(path), map_location = 'cpu')
self.load_state_dict(pkg['model'], strict = strict)
return pkg
@torch.no_grad()
@eval_decorator
def generate(
self,
num_latents = None,
*,
mask = None,
texts: Optional[Union[List[str], Tensor]] = None,
cond_semantic_token_ids = None,
prompt_acoustic_token_ids = None,
seconds = None,
batch_size = None,
start_temperature = 1.,
filter_thres = 0.7,
noise_level_scale = 1.,
num_full_sampling_levels = 1,
text_to_semantic_generate_kwargs: dict = {},
**kwargs
):
if self.should_condition and not exists(cond_semantic_token_ids):
assert exists(texts) and exists(self.text_to_semantic)
if is_bearable(texts, List[str]):
assert exists(self.text_to_semantic.tokenizer_encode)
texts = self.text_to_semantic.tokenizer_encode(texts)
texts = texts.to(self.device)
cond_semantic_token_ids = self.text_to_semantic.generate(
texts,
source_type = 'text',
target_type = 'speech',
**text_to_semantic_generate_kwargs
)
assert not (exists(cond_semantic_token_ids) ^ self.should_condition), 'you either have text-conditioning turned on and have not passed in any conditioning semantic token ids, or vice versa'
# maybe condition
cond_tokens = self.maybe_get_condition(cond_semantic_token_ids)
# determine batch size and sequence length, which depends whether it is conditioning
if exists(cond_tokens):
batch_size, num_latents = cond_tokens.shape[:2]
sample_one = batch_size == 1
else:
sample_one = not exists(batch_size)
batch_size = default(batch_size, 1)
assert exists(num_latents) ^ exists(seconds)
if not exists(num_latents):
assert exists(self.soundstream), 'soundstream must be passed in to generate in seconds'
num_latents = (seconds * self.soundstream.target_sample_hz) // self.soundstream.seq_len_multiple_of
# determine sequence length
num_effective_quantizers = self.grouped_quantizers * self.num_quantizers
seq_len = num_latents * num_effective_quantizers
# device and time
device = self.device
times = torch.linspace(0., 1., self.steps + 1, device = device)
# sequence starts off as all masked
# todo: find a better name for sequence mask vs mask for mask diffusion
shape = (batch_size, seq_len)
seq = torch.full(shape, self.mask_id, device = device)
seq_mask = mask
if not exists(seq_mask):
seq_mask = torch.ones((batch_size, num_latents), device = device, dtype = torch.bool)
seq_mask_with_quantizer = repeat(seq_mask, 'b n -> b (n q)', q = num_effective_quantizers)
mask = torch.full(shape, True, device = device)
# include prompt tokens unmasked as the sequence prefix, starting from the lowest quantizer
prompt_mask = None
if exists(prompt_acoustic_token_ids):
prompt_len, num_prompt_quantizers = prompt_acoustic_token_ids.shape[1:]
assert num_prompt_quantizers <= num_effective_quantizers, 'number of prompt quantizers cannot be greater than the number of quantizers'
seq = rearrange(seq, 'b (n q) -> b n q', q = num_effective_quantizers)
prompt_mask = rearrange(mask, 'b (n q) -> b n q', q = num_effective_quantizers)
seq[:, :prompt_len, :num_prompt_quantizers] = prompt_acoustic_token_ids
prompt_mask[:, :prompt_len, :num_prompt_quantizers] = False
seq = rearrange(seq, 'b n q -> b (n q)', q = num_effective_quantizers)
prompt_mask = rearrange(prompt_mask, 'b n q -> b (n q)', q = num_effective_quantizers)
# slowly demask
seq_len_from_mask = reduce(seq_mask, 'b n -> b', 'sum')
rand_mask_probs = self.schedule_fn(times[1:])
rand_mask_probs = rearrange(rand_mask_probs, 'n -> n 1')
all_mask_num_tokens = (rand_mask_probs * seq_len_from_mask).long()
# self conditioning
has_self_cond = self.self_cond
last_embed = self.null_embed if has_self_cond else None
for q_level in range(num_effective_quantizers):
mask_num_tokens_for_q_level = all_mask_num_tokens if q_level < num_full_sampling_levels else torch.zeros((1, batch_size), dtype = torch.long, device = device)
for mask_num_tokens, steps_until_x0 in tqdm(zip(mask_num_tokens_for_q_level, reversed(range(self.steps))), total = self.steps):
self_cond = self.to_self_cond(last_embed) if has_self_cond else None
logits, embeds = self.net(
seq,
mask = seq_mask,
cond = cond_tokens,
sum_embeds = self_cond,
return_logits_and_embeddings = True,
**kwargs
)
if has_self_cond:
last_embed = embeds
if exists(filter_thres):
logits = top_k(logits, filter_thres)
annealing_scale = steps_until_x0 / self.steps
temperature = start_temperature * annealing_scale
sampled_ids = gumbel_sample(logits, temperature = max(temperature, 1e-3))
# don't sample for lower quantizer levels
if q_level > 0:
sample_mask = rearrange(mask, 'b (n q) -> b n q', q = num_effective_quantizers)
sample_mask[:, :, :q_level] = False
sample_mask = rearrange(sample_mask, 'b n q -> b (n q)', q = num_effective_quantizers)
else:
sample_mask = mask
seq = torch.where(sample_mask, sampled_ids, seq)
if (mask_num_tokens == 0).all():
continue
if exists(self.token_critic):
scores = self.token_critic(seq)
scores = rearrange(scores, 'b n 1 -> b n')
scores = scores + noise_level_scale * gumbel_noise(scores) * annealing_scale
else:
scores = 1 - logits.softmax(dim = -1)
scores = scores.gather(2, rearrange(sampled_ids, 'b n -> b n 1'))
scores = rearrange(scores, 'b n 1 -> b n')
mask = torch.zeros_like(scores, dtype = torch.bool)
# mask based on highest score
mask_value = -torch.finfo(scores.dtype).max
scores = scores.masked_fill(~seq_mask_with_quantizer, mask_value)
if not self.can_mask_prev_unmasked:
scores = scores.masked_fill(~mask, mask_value)
scores_sorted = scores.argsort(dim = -1, descending = True)
mask_num_tokens = rearrange(mask_num_tokens, 'b -> b 1')
mask = scores_sorted < mask_num_tokens
mask = rearrange(mask, 'b (n q) -> b n q', q = num_effective_quantizers)
# mask all upper quantizer levels
if q_level < (num_effective_quantizers - 1):
mask[:, :, q_level + 1:] = True
# unmask all lower quantizer levels
if q_level > 0:
mask[:, :, :q_level] = False
mask = rearrange(mask, 'b n q -> b (n q)', q = num_effective_quantizers)
if exists(prompt_mask):
mask = mask & prompt_mask
seq = seq.masked_fill(mask, self.mask_id)
out = seq
if exists(self.soundstream):
seq = rearrange(seq, 'b (n q) -> b n q', q = self.num_quantizers)
with torch.no_grad():
self.soundstream.eval()
out = self.soundstream.decode_from_codebook_indices(seq)
out = rearrange(out, 'b 1 ... -> b ...')
if sample_one:
out = rearrange(out, '1 ... -> ...')
return out
def maybe_get_condition(self, token_ids = None, length = None):
assert not (exists(token_ids) ^ self.should_condition), 'you either have text-conditioning turned on and have not passed in any conditioning semantic token ids, or vice versa'
if not exists(token_ids):
return None
context = torch.no_grad if exists(self.text_to_semantic) else nullcontext
with context():
mask = token_ids != self.semantic_pad_id
# also remove the eos semantic token id
if exists(self.text_to_semantic) and self.text_to_semantic.autoset_eos_id['speech']:
mask &= token_ids != self.num_semantic_token_ids
token_ids = token_ids.masked_fill(~mask, 0)
semantic_tokens = self.semantic_token_emb(token_ids)
cond_tokens = self.semantic_cond_to_model_dim(semantic_tokens)
# just mask out the padding to 0s and let the network learn that for now
# eventually should add self attention masking to conformer, and calculate the correct number of masked tokens per variable lengthed batch row
cond_tokens = cond_tokens.masked_fill(~rearrange(mask, '... -> ... 1'), 0.)
# now need to interpolate the conditioning tokens
# to align semantic and vector quantized tokens, time-wise
cond_length = cond_tokens.shape[-2]
target_cond_length = math.ceil(cond_length * (self.wav2vec_downsample_factor / self.wav2vec_target_sample_hz) / (self.codec_downsample_factor / self.codec_target_sample_hz))
# pytorch does not interpolate 1d, so hack by convert to 2d
if cond_length != target_cond_length:
cond_tokens = rearrange(cond_tokens, 'b n d -> b d n 1')
cond_tokens = F.interpolate(cond_tokens, (target_cond_length, 1), mode = 'bilinear')
cond_tokens = rearrange(cond_tokens, 'b d n 1 -> b n d')
# whether to curtail or pad to length
cond_length = cond_tokens.shape[-2]
if exists(length):
if cond_length < length:
cond_tokens = F.pad(cond_tokens, (0, 0, 0, length - cond_length), value = 0.)
elif cond_length > length:
cond_tokens = cond_tokens[:, :length]
return cond_tokens
def forward(
self,
x,
*,
mask = None,
cond_semantic_token_ids = None,
only_train_generator = False,
only_train_critic = False,
generator_sample_temperature = None,
**kwargs
):
# if raw audio passed in, convert to residual quantized vectors
is_raw_audio = x.dtype == torch.float
# if semantic token ids not supplied and conditioning is indicated
# see if wav2vec and raw audio is available
if self.should_condition and not exists(cond_semantic_token_ids) and is_raw_audio:
with torch.no_grad():
self.wav2vec.eval()
cond_semantic_token_ids = self.wav2vec(x, flatten = False)
# derive residual vector quantized ids if raw audio passed in
if is_raw_audio:
assert exists(self.soundstream)
with torch.no_grad():
self.soundstream.eval()
_, x, _ = self.soundstream(x, return_encoded = True)
# shape
b, n, gq, device = *x.shape, x.device
assert gq == (self.num_quantizers * self.grouped_quantizers), f'codes passed in has {gq} quantizers (x groups) but the conformer wrapper was set to num_quantizers {self.num_quantizers} and grouped_quantizers {self.grouped_quantizers}'
# mask was used below, rename input mask as seq_mask
# todo: rename mask used for mask diffusion later
seq_mask = mask
if not exists(seq_mask):
seq_mask = torch.ones((b, n), device = device, dtype = torch.bool)
if exists(self.pad_id):
pad_mask = (x == self.pad_id).any(dim = -1)
seq_mask = seq_mask & ~pad_mask
if self.pad_id < 0:
# if using say -1 for padding
x = torch.where(rearrange(pad_mask, 'b n -> b n 1'), 0, x)
# maybe condition
cond_tokens = self.maybe_get_condition(cond_semantic_token_ids, length = x.shape[-2])
# prepare masking, selecting the prompt from a random prefix
orig_seq = rearrange(x.clone(), 'b n q -> b (n q)')
min_seq_len = seq_mask.sum(dim = -1).amin()
t = randrange(0, min_seq_len - 1)
mask = seq_mask[:, t:]
rand_times = torch.empty(b, device = device).uniform_(0, 1)
rand_probs = self.schedule_fn(rand_times)
mask = get_mask_subset_prob(mask, rand_probs)
# random quantizer position, in groups
q = randrange(0, self.num_quantizers) * self.grouped_quantizers
# to ensure all tokens produce embeddings, instead of just the ones with [mask] input, as done in seminal BERT MLM paper
# potentially needed for self-conditioning (on embedding) to work well
replace_mask_id_mask = mask.clone()
frac_seq_left = 1.
if self.no_replace_prob > 0. and coin_flip():
frac_seq_left -= self.no_replace_prob
no_replace_prob_mask = get_mask_subset_prob(mask, self.no_replace_prob)
replace_mask_id_mask &= ~no_replace_prob_mask
if self.random_token_prob > 0. and coin_flip():
random_token_prob_mask = get_mask_subset_prob(replace_mask_id_mask, self.random_token_prob * frac_seq_left)
random_tokens = torch.randint(0, self.num_tokens, (b, n - t), device = device)
x[:, t:, q] = torch.where(random_token_prob_mask, random_tokens, x[:, t:, q])
replace_mask_id_mask &= ~random_token_prob_mask
masked = torch.where(replace_mask_id_mask, self.mask_id, x[:, t:, q])
masked = rearrange(torch.cat((x[:, :t, q], masked), dim=1), 'b n -> b n 1')
masked = torch.cat((x[:, :, :q], masked, x[:, :, q + 1:]), dim=2)
masked[:, t:, q + 1:] = self.mask_id
masked = rearrange(masked, 'b n q -> b (n q)')
prompt_mask = torch.full((b, t), False, device=device)
lower_quantizers_mask = torch.full((b, n, q), False, device=device)
upper_quantizers_mask = torch.full((b, n, (gq - q - 1)), True, device=device)
# upper_quantizers_mask in prompt also should be False
upper_quantizers_mask[:, :t, :] = False
mask = rearrange(torch.cat((prompt_mask, replace_mask_id_mask), dim=1), 'b n -> b n 1')
mask = torch.cat((lower_quantizers_mask, mask, upper_quantizers_mask), dim = 2)
# above is the right mask, but when computing loss, only consider level q
mask[:, :, q + 1:] = False
mask = rearrange(mask, 'b n q -> b (n q)')
# self conditioning
if self.self_cond:
self_cond = self.null_embed
if sample_prob(self.self_cond_train_prob):
with torch.no_grad():
self_cond = self.net(
masked,
cond = cond_tokens,
return_embeddings = True,
mask = seq_mask,
**kwargs
).detach()
kwargs.update(sum_embeds = self.to_self_cond(self_cond))
# logits
context = torch.no_grad if only_train_critic else nullcontext
with context():
logits = self.net(
masked,
mask = seq_mask,
cond = cond_tokens,
**kwargs
)
# cross entropy loss
loss = F.cross_entropy(
logits[mask],
orig_seq[mask]
)
if not exists(self.token_critic) or only_train_generator:
return loss, LossBreakdown(loss, None)
sampled_ids = gumbel_sample(logits, temperature = default(generator_sample_temperature, random()))
generated = torch.where(mask, sampled_ids, orig_seq)
critic_logits = self.token_critic(generated)
critic_labels = (sampled_ids != orig_seq).float()
critic_loss = F.binary_cross_entropy_with_logits(
rearrange(critic_logits, '... 1 -> ...'),
critic_labels
)
# determine losses to be returned based on what researcher wants to train
if only_train_critic:
total_loss = critic_loss
loss = None
else:
total_loss = loss + critic_loss * self.critic_loss_weight
return total_loss, LossBreakdown(loss, critic_loss)
| soundstorm-pytorch-main | soundstorm_pytorch/soundstorm.py |
from soundstorm_pytorch.soundstorm import (
SoundStorm,
SoundStream,
ConformerWrapper,
Conformer
)
from soundstorm_pytorch.trainer import (
SoundStormTrainer
)
| soundstorm-pytorch-main | soundstorm_pytorch/__init__.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
causal = False,
dropout = 0.,
flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.flash = flash
assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = EfficientAttentionConfig(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = EfficientAttentionConfig(False, True, True)
def get_mask(self, i, j, device):
return torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1)
def flash_attn(self, q, k, v, mask = None, attn_bias = None):
_, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device
# single headed key / values
if k.ndim == 3:
k = rearrange(k, 'b n d -> b 1 n d')
if v.ndim == 3:
v = rearrange(v, 'b n d -> b 1 n d')
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask) and mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
causal = self.causal
# handle attention bias
if exists(attn_bias):
mask_value = -torch.finfo(q.dtype).max // 2
causal_mask = self.get_mask(q_len, k_len, device)
attn_bias = attn_bias.masked_fill(causal_mask, mask_value)
if exists(mask):
attn_bias = attn_bias.masked_fill(~mask, mask_value)
mask = attn_bias
causal = False
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = causal
)
return out
def forward(self, q, k, v, mask = None, attn_bias = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
q_len, k_len, device = q.shape[-2], k.shape[-2], q.device
scale = q.shape[-1] ** -0.5
kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d'
if self.flash:
assert not exists(attn_bias)
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale
# attention bias
if exists(attn_bias):
sim = sim + attn_bias
# causal mask
if self.causal:
causal_mask = self.get_mask(q_len, k_len, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# key padding mask
if exists(mask):
if mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v)
return out
| soundstorm-pytorch-main | soundstorm_pytorch/attend.py |
from pathlib import Path
import re
from shutil import rmtree
from beartype import beartype
from beartype.typing import Optional
import torch
from torch import nn
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import Dataset, random_split
from audiolm_pytorch.data import get_dataloader
from audiolm_pytorch.optimizer import get_optimizer
from soundstorm_pytorch.soundstorm import SoundStorm
from accelerate import Accelerator, DistributedType
# helpers
def exists(val):
return val is not None
def noop(*args, **kwargs):
pass
def cycle(dl):
while True:
for data in dl:
yield data
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def yes_or_no(question):
answer = input(f'{question} (y/n) ')
return answer.lower() in ('yes', 'y')
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
def checkpoint_num_steps(checkpoint_path):
"""Returns the number of steps trained from a checkpoint based on the filename.
Filename format assumed to be something like "/path/to/soundstorm.20000.pt" which is
for 20k train steps. Returns 20000 in that case.
"""
results = re.findall(r'\d+', str(checkpoint_path))
if len(results) == 0:
return 0
return int(results[-1])
class SoundStormTrainer(nn.Module):
@beartype
def __init__(
self,
model: SoundStorm,
*,
num_train_steps,
num_warmup_steps,
batch_size,
dataset: Optional[Dataset] = None,
only_train_generator = False,
only_train_critic = False,
lr = 3e-4,
initial_lr = 1e-5,
grad_accum_every = 1,
wd = 0.,
max_grad_norm = 0.5,
valid_frac = 0.05,
random_split_seed = 42,
save_results_every = 100,
save_model_every = 1000,
results_folder = './results',
accelerate_kwargs: dict = dict(),
split_batches = False,
drop_last = False,
force_clear_prev_results = None
):
super().__init__()
self.accelerator = Accelerator(
split_batches = split_batches,
**accelerate_kwargs
)
self.model = model
self.register_buffer('steps', torch.Tensor([0]))
self.num_train_steps = num_train_steps
self.num_warmup_steps = num_warmup_steps
self.batch_size = batch_size
self.grad_accum_every = grad_accum_every
self.only_train_generator = only_train_generator
self.only_train_critic = only_train_critic
# optimizer
self.optim = get_optimizer(
model.parameters(),
lr = lr,
wd = wd
)
self.lr = lr
self.initial_lr = initial_lr
self.scheduler = CosineAnnealingLR(self.optim, T_max = num_train_steps)
# max grad norm
self.max_grad_norm = max_grad_norm
# create dataset
self.ds = dataset
# split for validation
if valid_frac > 0:
train_size = int((1 - valid_frac) * len(self.ds))
valid_size = len(self.ds) - train_size
self.ds, self.valid_ds = random_split(self.ds, [train_size, valid_size], generator = torch.Generator().manual_seed(random_split_seed))
self.print(f'training with dataset of {len(self.ds)} samples and validating with randomly splitted {len(self.valid_ds)} samples')
else:
self.valid_ds = self.ds
self.print(f'training with shared training and valid dataset of {len(self.ds)} samples')
assert len(self.ds) >= batch_size, 'dataset must have sufficient samples for training'
assert len(self.valid_ds) >= batch_size, f'validation dataset must have sufficient number of samples (currently {len(self.valid_ds)}) for training'
# dataloader
self.dl = get_dataloader(self.ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
self.valid_dl = get_dataloader(self.valid_ds, batch_size = batch_size, shuffle = True, drop_last = drop_last)
# prepare with accelerator
(
self.model,
self.optim,
self.scheduler,
self.dl,
self.valid_dl
) = self.accelerator.prepare(
self.model,
self.optim,
self.scheduler,
self.dl,
self.valid_dl
)
# dataloader iterators
self.dl_iter = cycle(self.dl)
self.valid_dl_iter = cycle(self.valid_dl)
self.save_model_every = save_model_every
self.save_results_every = save_results_every
self.results_folder = Path(results_folder)
if self.is_main and force_clear_prev_results is True or (not exists(force_clear_prev_results) and len([*self.results_folder.glob('**/*')]) > 0 and yes_or_no('do you want to clear previous experiment checkpoints and results?')):
rmtree(str(self.results_folder))
self.results_folder.mkdir(parents = True, exist_ok = True)
hps = {"num_train_steps": num_train_steps, "num_warmup_steps": num_warmup_steps, "learning_rate": lr, "initial_learning_rate": lr}
self.accelerator.init_trackers("soundstorm", config=hps)
def save(self, path):
pkg = dict(
model = self.accelerator.get_state_dict(self.model),
optim = self.optim.state_dict(),
scheduler = self.scheduler.state_dict()
)
torch.save(pkg, path)
def load(self, path, restore_optimizer = True):
model = self.accelerator.unwrap_model(self.model)
pkg = model.load(path)
if restore_optimizer:
self.optim.load_state_dict(pkg['optim'])
self.scheduler.load_state_dict(pkg['scheduler'])
# + 1 to start from the next step and avoid overwriting the last checkpoint
self.steps = torch.tensor([checkpoint_num_steps(path) + 1], device=self.device)
def print(self, msg):
self.accelerator.print(msg)
def generate(self, *args, **kwargs):
return self.model.generate(*args, **kwargs)
@property
def device(self):
return self.accelerator.device
@property
def is_distributed(self):
return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)
@property
def is_main(self):
return self.accelerator.is_main_process
@property
def is_local_main(self):
return self.accelerator.is_local_main_process
def warmup(self, step):
if step < self.num_warmup_steps:
return self.initial_lr + (self.lr - self.initial_lr) * step / self.num_warmup_steps
else:
return self.lr
def train_step(self):
steps = int(self.steps.item())
self.model.train()
# adjust the lr according to the schedule
if steps < self.num_warmup_steps:
# Apply warmup
lr = self.warmup(steps)
for param_group in self.optim.param_groups:
param_group['lr'] = lr
else:
# After warmup period, start to apply CosineAnnealingLR
self.scheduler.step()
# logs
logs = {}
# update generator
for _ in range(self.grad_accum_every):
semantic_token_ids, acoustic_token_ids = next(self.dl_iter)
loss, loss_breakdown = self.model(
acoustic_token_ids,
cond_semantic_token_ids = semantic_token_ids,
only_train_generator = self.only_train_generator,
only_train_critic = self.only_train_critic
)
generator_loss, critic_loss = loss_breakdown
generator_loss = 0. if generator_loss is None else generator_loss
critic_loss = 0. if critic_loss is None else critic_loss
self.accelerator.backward(loss / self.grad_accum_every)
accum_log(logs, {'loss': loss.item() / self.grad_accum_every, 'generator_loss': generator_loss / self.grad_accum_every, 'critic_loss': critic_loss / self.grad_accum_every})
if exists(self.max_grad_norm):
self.accelerator.clip_grad_norm_(self.model.parameters(), self.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
# log
self.print(f"{steps}: loss: {logs['loss']:0.3f}, generator loss: {logs['generator_loss']:0.3f}, critic loss: {logs['critic_loss']:0.3f}")
self.accelerator.log({"train_loss": logs['loss']}, step=steps)
# sample results every so often
self.accelerator.wait_for_everyone()
if self.is_main and not (steps % self.save_results_every):
semantic_token_ids, acoustic_token_ids = next(self.valid_dl_iter)
with torch.inference_mode():
self.model.eval()
valid_loss, valid_loss_breakdown = self.model(acoustic_token_ids, cond_semantic_token_ids = semantic_token_ids)
valid_generator_loss, valid_critic_loss = valid_loss_breakdown
valid_generator_loss = 0. if valid_generator_loss is None else valid_generator_loss
valid_critic_loss = 0. if valid_critic_loss is None else valid_critic_loss
self.print(f'{steps}: valid loss {valid_loss:0.3f}, valid generator loss {valid_generator_loss:0.3f}, valid critic loss {valid_critic_loss:0.3f}')
self.accelerator.log({"valid_loss": valid_loss, "valid_generator_loss": valid_generator_loss, "valid_critic_loss": valid_critic_loss}, step=steps)
# save model every so often
if self.is_main and not (steps % self.save_model_every):
model_path = str(self.results_folder / f'soundstorm.{steps}.pt')
self.save(model_path)
self.print(f'{steps}: saving model to {str(self.results_folder)}')
self.steps += 1
return logs
def train(self, log_fn = noop):
while self.steps < self.num_train_steps:
logs = self.train_step()
log_fn(logs)
self.print('training complete')
| soundstorm-pytorch-main | soundstorm_pytorch/trainer.py |
from setuptools import setup, find_packages
setup(
name = 'geometric-vector-perceptron',
packages = find_packages(),
version = '0.0.14',
license='MIT',
description = 'Geometric Vector Perceptron - Pytorch',
author = 'Phil Wang, Eric Alcaide',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/geometric-vector-perceptron',
keywords = [
'artificial intelligence',
'deep learning',
'proteins',
'biomolecules',
'equivariance'
],
install_requires=[
'torch>=1.6',
'torch-scatter',
'torch-sparse',
'torch-cluster',
'torch-spline-conv',
'torch-geometric'
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| geometric-vector-perceptron-main | setup.py |
from geometric_vector_perceptron.geometric_vector_perceptron import GVP, GVPDropout, GVPLayerNorm, GVP_MPNN, GVP_Network
| geometric-vector-perceptron-main | geometric_vector_perceptron/__init__.py |
import torch
from torch import nn, einsum
from torch_geometric.nn import MessagePassing
# types
from typing import Optional, List, Union
from torch_geometric.typing import OptPairTensor, Adj, Size, OptTensor, Tensor
# helper functions
def exists(val):
return val is not None
# classes
class GVP(nn.Module):
def __init__(
self,
*,
dim_vectors_in,
dim_vectors_out,
dim_feats_in,
dim_feats_out,
feats_activation = nn.Sigmoid(),
vectors_activation = nn.Sigmoid(),
vector_gating = False
):
super().__init__()
self.dim_vectors_in = dim_vectors_in
self.dim_feats_in = dim_feats_in
self.dim_vectors_out = dim_vectors_out
dim_h = max(dim_vectors_in, dim_vectors_out)
self.Wh = nn.Parameter(torch.randn(dim_vectors_in, dim_h))
self.Wu = nn.Parameter(torch.randn(dim_h, dim_vectors_out))
self.vectors_activation = vectors_activation
self.to_feats_out = nn.Sequential(
nn.Linear(dim_h + dim_feats_in, dim_feats_out),
feats_activation
)
# branching logic to use old GVP, or GVP with vector gating
self.scalar_to_vector_gates = nn.Linear(dim_feats_out, dim_vectors_out) if vector_gating else None
def forward(self, data):
feats, vectors = data
b, n, _, v, c = *feats.shape, *vectors.shape
assert c == 3 and v == self.dim_vectors_in, 'vectors have wrong dimensions'
assert n == self.dim_feats_in, 'scalar features have wrong dimensions'
Vh = einsum('b v c, v h -> b h c', vectors, self.Wh)
Vu = einsum('b h c, h u -> b u c', Vh, self.Wu)
sh = torch.norm(Vh, p = 2, dim = -1)
s = torch.cat((feats, sh), dim = 1)
feats_out = self.to_feats_out(s)
if exists(self.scalar_to_vector_gates):
gating = self.scalar_to_vector_gates(feats_out)
gating = gating.unsqueeze(dim = -1)
else:
gating = torch.norm(Vu, p = 2, dim = -1, keepdim = True)
vectors_out = self.vectors_activation(gating) * Vu
return (feats_out, vectors_out)
class GVPDropout(nn.Module):
""" Separate dropout for scalars and vectors. """
def __init__(self, rate):
super().__init__()
self.vector_dropout = nn.Dropout2d(rate)
self.feat_dropout = nn.Dropout(rate)
def forward(self, feats, vectors):
return self.feat_dropout(feats), self.vector_dropout(vectors)
class GVPLayerNorm(nn.Module):
""" Normal layer norm for scalars, nontrainable norm for vectors. """
def __init__(self, feats_h_size, eps = 1e-8):
super().__init__()
self.eps = eps
self.feat_norm = nn.LayerNorm(feats_h_size)
def forward(self, feats, vectors):
vector_norm = vectors.norm(dim=(-1,-2), keepdim=True)
normed_feats = self.feat_norm(feats)
normed_vectors = vectors / (vector_norm + self.eps)
return normed_feats, normed_vectors
class GVP_MPNN(MessagePassing):
r"""The Geometric Vector Perceptron message passing layer
introduced in https://openreview.net/forum?id=1YLJDvSx6J4.
Uses a Geometric Vector Perceptron instead of the normal
MLP in aggregation phase.
Inputs will be a concatenation of (vectors, features)
Args:
* feats_x_in: int. number of scalar dimensions in the x inputs.
* vectors_x_in: int. number of vector dimensions in the x inputs.
* feats_x_out: int. number of scalar dimensions in the x outputs.
* vectors_x_out: int. number of vector dimensions in the x outputs.
* feats_edge_in: int. number of scalar dimensions in the edge_attr inputs.
* vectors_edge_in: int. number of vector dimensions in the edge_attr inputs.
* feats_edge_out: int. number of scalar dimensions in the edge_attr outputs.
* vectors_edge_out: int. number of vector dimensions in the edge_attr outputs.
* dropout: float. dropout rate.
* vector_dim: int. dimensions of the space containing the vectors.
* verbose: bool. verbosity level.
"""
def __init__(self, feats_x_in, vectors_x_in,
feats_x_out, vectors_x_out,
feats_edge_in, vectors_edge_in,
feats_edge_out, vectors_edge_out,
dropout, residual=False, vector_dim=3,
verbose=False, **kwargs):
super(GVP_MPNN, self).__init__(aggr="mean",**kwargs)
self.verbose = verbose
# record x dimensions ( vector + scalars )
self.feats_x_in = feats_x_in
self.vectors_x_in = vectors_x_in # N vectors features in input
self.feats_x_out = feats_x_out
self.vectors_x_out = vectors_x_out # N vectors features in output
# record edge_attr dimensions ( vector + scalars )
self.feats_edge_in = feats_edge_in
self.vectors_edge_in = vectors_edge_in # N vectors features in input
self.feats_edge_out = feats_edge_out
self.vectors_edge_out = vectors_edge_out # N vectors features in output
# aux layers
self.vector_dim = vector_dim
self.norm = nn.ModuleList([GVPLayerNorm(self.feats_x_out), # + self.feats_edge_out
GVPLayerNorm(self.feats_x_out)])
self.dropout = GVPDropout(dropout)
self.residual = residual
# this receives the vec_in message AND the receiver node
self.W_EV = nn.Sequential(GVP(
dim_vectors_in = self.vectors_x_in + self.vectors_edge_in,
dim_vectors_out = self.vectors_x_out + self.feats_edge_out,
dim_feats_in = self.feats_x_in + self.feats_edge_in,
dim_feats_out = self.feats_x_out + self.feats_edge_out
),
GVP(
dim_vectors_in = self.vectors_x_out + self.feats_edge_out,
dim_vectors_out = self.vectors_x_out + self.feats_edge_out,
dim_feats_in = self.feats_x_out + self.feats_edge_out,
dim_feats_out = self.feats_x_out + self.feats_edge_out
),
GVP(
dim_vectors_in = self.vectors_x_out + self.feats_edge_out,
dim_vectors_out = self.vectors_x_out + self.feats_edge_out,
dim_feats_in = self.feats_x_out + self.feats_edge_out,
dim_feats_out = self.feats_x_out + self.feats_edge_out
))
self.W_dh = nn.Sequential(GVP(
dim_vectors_in = self.vectors_x_out,
dim_vectors_out = 2*self.vectors_x_out,
dim_feats_in = self.feats_x_out,
dim_feats_out = 4*self.feats_x_out
),
GVP(
dim_vectors_in = 2*self.vectors_x_out,
dim_vectors_out = self.vectors_x_out,
dim_feats_in = 4*self.feats_x_out,
dim_feats_out = self.feats_x_out
))
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
edge_attr: OptTensor = None, size: Size = None) -> Tensor:
""""""
x_size = list(x.shape)[-1]
# aggregate feats and vectors separately
feats, vectors = self.propagate(edge_index, x=x, edge_attr=edge_attr)
# aggregate
feats, vectors = self.dropout(feats, vectors.reshape(vectors.shape[0], -1, self.vector_dim))
# get the information relative to the nodes - edges not returned
feats_nodes = feats[:, :self.feats_x_in]
vector_nodes = vectors[:, :self.vectors_x_in]
# reshapes the vector part to last 3d
x_vectors = x[:, :self.vectors_x_in * self.vector_dim].reshape(x.shape[0], -1, self.vector_dim)
feats, vectors = self.norm[0]( x[:, self.vectors_x_in * self.vector_dim:]+feats_nodes, x_vectors+vector_nodes )
# update position-wise feedforward
feats_, vectors_ = self.dropout( *self.W_dh( (feats, vectors) ) )
feats, vectors = self.norm[1]( feats+feats_, vectors+vectors_ )
# make it residual
new_x = torch.cat( [feats, vectors.flatten(start_dim=-2)], dim=-1 )
if self.residual:
return new_x + x
return new_x
def message(self, x_j, edge_attr) -> Tensor:
feats = torch.cat([ x_j[:, self.vectors_x_in * self.vector_dim:],
edge_attr[:, self.vectors_edge_in * self.vector_dim:] ], dim=-1)
vectors = torch.cat([ x_j[:, :self.vectors_x_in * self.vector_dim],
edge_attr[:, :self.vectors_edge_in * self.vector_dim] ], dim=-1).reshape(x_j.shape[0],-1,self.vector_dim)
feats, vectors = self.W_EV( (feats, vectors) )
return feats, vectors.flatten(start_dim=-2)
def propagate(self, edge_index: Adj, size: Size = None, **kwargs):
r"""The initial call to start propagating messages.
Args:
adj (Tensor or SparseTensor): `edge_index` holds the indices of a general (sparse)
assignment matrix of shape :obj:`[N, M]`.
size (tuple, optional): If set to :obj:`None`, the size will be automatically inferred
and assumed to be quadratic.
This argument is ignored in case :obj:`edge_index` is a
:obj:`torch_sparse.SparseTensor`. (default: :obj:`None`)
**kwargs: Any additional data which is needed to construct and
aggregate messages, and to update node embeddings.
"""
size = self.__check_input__(edge_index, size)
coll_dict = self.__collect__(self.__user_args__,
edge_index, size, kwargs)
msg_kwargs = self.inspector.distribute('message', coll_dict)
feats, vectors = self.message(**msg_kwargs)
# aggregate them
aggr_kwargs = self.inspector.distribute('aggregate', coll_dict)
out_feats = self.aggregate(feats, **aggr_kwargs)
out_vectors = self.aggregate(vectors, **aggr_kwargs)
# return tuple
update_kwargs = self.inspector.distribute('update', coll_dict)
return self.update((out_feats, out_vectors), **update_kwargs)
def __repr__(self):
dict_print = { "feats_x_in": self.feats_x_in,
"vectors_x_in": self.vectors_x_in,
"feats_x_out": self.feats_x_out,
"vectors_x_out": self.vectors_x_out,
"feats_edge_in": self.feats_edge_in,
"vectors_edge_in": self.vectors_edge_in,
"feats_edge_out": self.feats_edge_out,
"vectors_edge_out": self.vectors_edge_out,
"vector_dim": self.vector_dim }
return 'GVP_MPNN Layer with the following attributes: ' + str(dict_print)
class GVP_Network(nn.Module):
r"""Sample GNN model architecture that uses the Geometric Vector Perceptron
message passing layer to learn over point clouds.
Main MPNN layer introduced in https://openreview.net/forum?id=1YLJDvSx6J4.
Inputs will be standard GNN: x, edge_index, edge_attr, batch, ...
Args:
* n_layers: int. number of MPNN layers
* feats_x_in: int. number of scalar dimensions in the x inputs.
* vectors_x_in: int. number of vector dimensions in the x inputs.
* feats_x_out: int. number of scalar dimensions in the x outputs.
* vectors_x_out: int. number of vector dimensions in the x outputs.
* feats_edge_in: int. number of scalar dimensions in the edge_attr inputs.
* vectors_edge_in: int. number of vector dimensions in the edge_attr inputs.
* feats_edge_out: int. number of scalar dimensions in the edge_attr outputs.
* embedding_nums: list. number of unique keys to embedd. for points
1 entry per embedding needed.
* embedding_dims: list. point - number of dimensions of
the resulting embedding. 1 entry per embedding needed.
* edge_embedding_nums: list. number of unique keys to embedd. for edges.
1 entry per embedding needed.
* edge_embedding_dims: list. point - number of dimensions of
the resulting embedding. 1 entry per embedding needed.
* vectors_edge_out: int. number of vector dimensions in the edge_attr outputs.
* dropout: float. dropout rate.
* vector_dim: int. dimensions of the space containing the vectors.
* recalc: bool. Whether to recalculate edge features between MPNN layers.
* verbose: bool. verbosity level.
"""
def __init__(self, n_layers,
feats_x_in, vectors_x_in,
feats_x_out, vectors_x_out,
feats_edge_in, vectors_edge_in,
feats_edge_out, vectors_edge_out,
embedding_nums=[], embedding_dims=[],
edge_embedding_nums=[], edge_embedding_dims=[],
dropout=0.0, residual=False, vector_dim=3,
recalc=1, verbose=False):
super().__init__()
self.n_layers = n_layers
# Embeddings? solve here
self.embedding_nums = embedding_nums
self.embedding_dims = embedding_dims
self.emb_layers = torch.nn.ModuleList()
self.edge_embedding_nums = edge_embedding_nums
self.edge_embedding_dims = edge_embedding_dims
self.edge_emb_layers = torch.nn.ModuleList()
# instantiate point and edge embedding layers
for i in range( len(self.embedding_dims) ):
self.emb_layers.append(nn.Embedding(num_embeddings = embedding_nums[i],
embedding_dim = embedding_dims[i]))
feats_x_in += embedding_dims[i] - 1
feats_x_out += embedding_dims[i] - 1
for i in range( len(self.edge_embedding_dims) ):
self.edge_emb_layers.append(nn.Embedding(num_embeddings = edge_embedding_nums[i],
embedding_dim = edge_embedding_dims[i]))
feats_edge_in += edge_embedding_dims[i] - 1
feats_edge_out += edge_embedding_dims[i] - 1
# rest
self.fc_layers = torch.nn.ModuleList()
self.gcnn_layers = torch.nn.ModuleList()
self.feats_x_in = feats_x_in
self.vectors_x_in = vectors_x_in
self.feats_x_out = feats_x_out
self.vectors_x_out = vectors_x_out
self.feats_edge_in = feats_edge_in
self.vectors_edge_in = vectors_edge_in
self.feats_edge_out = feats_edge_out
self.vectors_edge_out = vectors_edge_out
self.dropout = dropout
self.residual = residual
self.vector_dim = vector_dim
self.recalc = recalc
self.verbose = verbose
# instantiate layers
for i in range(n_layers):
layer = GVP_MPNN(feats_x_in, vectors_x_in,
feats_x_out, vectors_x_out,
feats_edge_in, vectors_edge_in,
feats_edge_out, vectors_edge_out,
dropout, residual=residual,
vector_dim=vector_dim, verbose=verbose)
self.gcnn_layers.append(layer)
def forward(self, x, edge_index, batch, edge_attr,
bsize=None, recalc_edge=None, verbose=0):
""" Embedding of inputs when necessary, then pass layers.
Recalculate edge features every time with the
`recalc_edge` function.
"""
original_x = x.clone()
original_edge_index = edge_index.clone()
original_edge_attr = edge_attr.clone()
# do embeddings when needed
# pick to embedd. embedd sequentially and add to input
# points:
to_embedd = x[:, -len(self.embedding_dims):].long()
for i,emb_layer in enumerate(self.emb_layers):
# the portion corresponding to `to_embedd` part gets dropped
# at first iter
stop_concat = -len(self.embedding_dims) if i == 0 else x.shape[-1]
x = torch.cat([ x[:, :stop_concat],
emb_layer( to_embedd[:, i] )
], dim=-1)
# pass layers
for i,layer in enumerate(self.gcnn_layers):
# embedd edge items (needed everytime since edge_attr and idxs
# are recalculated every pass)
to_embedd = edge_attr[:, -len(self.edge_embedding_dims):].long()
for j,edge_emb_layer in enumerate(self.edge_emb_layers):
# the portion corresponding to `to_embedd` part gets dropped
# at first iter
stop_concat = -len(self.edge_embedding_dims) if j == 0 else x.shape[-1]
edge_attr = torch.cat([ edge_attr[:, :-len(self.edge_embedding_dims) + j],
edge_emb_layer( to_embedd[:, j] )
], dim=-1)
# pass layers
x = layer(x, edge_index, edge_attr, size=bsize)
# recalculate edge info every self.recalc steps
# but not needed if last layer of last iteration
if (1%self.recalc == 0) and not (i == self.n_layers-1) :
edge_index, edge_attr, _ = recalc_edge(x) # returns attr, idx, embedd_info
else:
edge_attr = original_edge_attr.clone()
edge_index = original_edge_index.clone()
if verbose:
print("========")
print("iter:", j, "layer:", i, "nlinks:", edge_attr.shape)
return x
def __repr__(self):
return 'GVP_Network of: {0} layers'.format(len(self.gcnn_layers))
| geometric-vector-perceptron-main | geometric_vector_perceptron/geometric_vector_perceptron.py |
import torch
from geometric_vector_perceptron import GVP, GVPDropout, GVPLayerNorm, GVP_MPNN
TOL = 1e-2
def random_rotation():
q, r = torch.qr(torch.randn(3, 3))
return q
def diff_matrix(vectors):
b, _, d = vectors.shape
diff = vectors[..., None, :] - vectors[:, None, ...]
return diff.reshape(b, -1, d)
def test_equivariance():
R = random_rotation()
model = GVP(
dim_vectors_in = 1024,
dim_feats_in = 512,
dim_vectors_out = 256,
dim_feats_out = 512
)
feats = torch.randn(1, 512)
vectors = torch.randn(1, 32, 3)
feats_out, vectors_out = model( (feats, diff_matrix(vectors)) )
feats_out_r, vectors_out_r = model( (feats, diff_matrix(vectors @ R)) )
err = ((vectors_out @ R) - vectors_out_r).max()
assert err < TOL, 'equivariance must be respected'
def test_all_layer_types():
R = random_rotation()
model = GVP(
dim_vectors_in = 1024,
dim_feats_in = 512,
dim_vectors_out = 256,
dim_feats_out = 512
)
dropout = GVPDropout(0.2)
layer_norm = GVPLayerNorm(512)
feats = torch.randn(1, 512)
message = torch.randn(1, 512)
vectors = torch.randn(1, 32, 3)
# GVP layer
feats_out, vectors_out = model( (feats, diff_matrix(vectors)) )
assert list(feats_out.shape) == [1, 512] and list(vectors_out.shape) == [1, 256, 3]
# GVP Dropout
feats_out, vectors_out = dropout(feats_out, vectors_out)
assert list(feats_out.shape) == [1, 512] and list(vectors_out.shape) == [1, 256, 3]
# GVP Layer Norm
feats_out, vectors_out = layer_norm(feats_out, vectors_out)
assert list(feats_out.shape) == [1, 512] and list(vectors_out.shape) == [1, 256, 3]
def test_mpnn():
# input data
x = torch.randn(5, 32)
edge_idx = torch.tensor([[0,2,3,4,1], [1,1,3,3,4]]).long()
edge_attr = torch.randn(5, 16)
# nodes (8 scalars and 8 vectors) || edges (4 scalars and 3 vectors)
dropout = 0.1
# define layer
gvp_mpnn = GVP_MPNN(feats_x_in = 8,
vectors_x_in = 8,
feats_x_out = 8,
vectors_x_out = 8,
feats_edge_in = 4,
vectors_edge_in = 4,
feats_edge_out = 4,
vectors_edge_out = 4,
dropout=0.1 )
x_out = gvp_mpnn(x, edge_idx, edge_attr)
assert x.shape == x_out.shape, "Input and output shapes don't match"
if __name__ == "__main__":
test_equivariance()
test_all_layer_types()
test_mpnn()
| geometric-vector-perceptron-main | tests/tests.py |
# Author: Eric Alcaide
# A substantial part has been borrowed from
# https://github.com/jonathanking/sidechainnet
#
# Here's the License for it:
#
# Copyright 2020 Jonathan King
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
warnings.filterwarnings("ignore")
import torch
import numpy as np
from einops import repeat, rearrange
######################
## structural utils ##
######################
def get_dihedral(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Inputs:
* c1: (batch, 3) or (3,)
* c2: (batch, 3) or (3,)
* c3: (batch, 3) or (3,)
* c4: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return torch.atan2( ( (torch.norm(u2, dim=-1, keepdim=True) * u1) * torch.cross(u2,u3, dim=-1) ).sum(dim=-1) ,
( torch.cross(u1,u2, dim=-1) * torch.cross(u2, u3, dim=-1) ).sum(dim=-1) )
def get_angle(c1, c2, c3):
""" Returns the angle in radians.
Inputs:
* c1: (batch, 3) or (3,)
* c2: (batch, 3) or (3,)
* c3: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
# don't use trad arccos since it gets the "smallest angle",
# not necessarily the one we want
# return torch.acos( (u1*u2).sum(dim=-1) / (torch.norm(u1, dim=-1)*torch.norm(u2, dim=-1)) )+
# better use atan2 formula: atan2(cross, dot) from here:
# https://johnblackburne.blogspot.com/2012/05/angle-between-two-3d-vectors.html
# add a minus since we want the angle in reversed order - sidechainnet issues
return torch.atan2( torch.norm(torch.cross(u1,u2, dim=-1), dim=-1),
-(u1*u2).sum(dim=-1) )
def kabsch_torch(X, Y):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (D, N) - usually (3, N)
"""
# center X and Y to the origin
X_ = X - X.mean(dim=-1, keepdim=True)
Y_ = Y - Y.mean(dim=-1, keepdim=True)
# calculate convariance matrix (for each prot in the batch)
C = torch.matmul(X_, Y_.t())
# Optimal rotation matrix via SVD - warning! W must be transposed
V, S, W = torch.svd(C.detach())
# determinant sign for direction correction
d = (torch.det(V) * torch.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = torch.matmul(V, W.t())
# calculate rotations
X_ = torch.matmul(X_.t(), U).t()
# return centered and aligned
return X_, Y_
def rmsd_torch(X, Y):
""" Assumes x,y are both (batch, d, n) - usually (batch, 3, N). """
return torch.sqrt( torch.mean((X - Y)**2, axis=(-1, -2)) )
############
### INFO ###
############
SC_BUILD_INFO = {
'A': {
'angles-names': ['N-CA-CB'],
'angles-types': ['N -CX-CT'],
'angles-vals': [1.9146261894377796],
'atom-names': ['CB'],
'bonds-names': ['CA-CB'],
'bonds-types': ['CX-CT'],
'bonds-vals': [1.526],
'torsion-names': ['C-N-CA-CB'],
'torsion-types': ['C -N -CX-CT'],
'torsion-vals': ['p']
},
'R': {
'angles-names': [
'N-CA-CB', 'CA-CB-CG', 'CB-CG-CD', 'CG-CD-NE', 'CD-NE-CZ', 'NE-CZ-NH1',
'NE-CZ-NH2'
],
'angles-types': [
'N -CX-C8', 'CX-C8-C8', 'C8-C8-C8', 'C8-C8-N2', 'C8-N2-CA', 'N2-CA-N2',
'N2-CA-N2'
],
'angles-vals': [
1.9146261894377796, 1.911135530933791, 1.911135530933791, 1.9408061282176945,
2.150245638457014, 2.0943951023931953, 2.0943951023931953
],
'atom-names': ['CB', 'CG', 'CD', 'NE', 'CZ', 'NH1', 'NH2'],
'bonds-names': ['CA-CB', 'CB-CG', 'CG-CD', 'CD-NE', 'NE-CZ', 'CZ-NH1', 'CZ-NH2'],
'bonds-types': ['CX-C8', 'C8-C8', 'C8-C8', 'C8-N2', 'N2-CA', 'CA-N2', 'CA-N2'],
'bonds-vals': [1.526, 1.526, 1.526, 1.463, 1.34, 1.34, 1.34],
'torsion-names': [
'C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-CD', 'CB-CG-CD-NE', 'CG-CD-NE-CZ',
'CD-NE-CZ-NH1', 'CD-NE-CZ-NH2'
],
'torsion-types': [
'C -N -CX-C8', 'N -CX-C8-C8', 'CX-C8-C8-C8', 'C8-C8-C8-N2', 'C8-C8-N2-CA',
'C8-N2-CA-N2', 'C8-N2-CA-N2'
],
'torsion-vals': ['p', 'p', 'p', 'p', 'p', 'p', 'i']
},
'N': {
'angles-names': ['N-CA-CB', 'CA-CB-CG', 'CB-CG-OD1', 'CB-CG-ND2'],
'angles-types': ['N -CX-2C', 'CX-2C-C ', '2C-C -O ', '2C-C -N '],
'angles-vals': [
1.9146261894377796, 1.9390607989657, 2.101376419401173, 2.035053907825388
],
'atom-names': ['CB', 'CG', 'OD1', 'ND2'],
'bonds-names': ['CA-CB', 'CB-CG', 'CG-OD1', 'CG-ND2'],
'bonds-types': ['CX-2C', '2C-C ', 'C -O ', 'C -N '],
'bonds-vals': [1.526, 1.522, 1.229, 1.335],
'torsion-names': ['C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-OD1', 'CA-CB-CG-ND2'],
'torsion-types': ['C -N -CX-2C', 'N -CX-2C-C ', 'CX-2C-C -O ', 'CX-2C-C -N '],
'torsion-vals': ['p', 'p', 'p', 'i']
},
'D': {
'angles-names': ['N-CA-CB', 'CA-CB-CG', 'CB-CG-OD1', 'CB-CG-OD2'],
'angles-types': ['N -CX-2C', 'CX-2C-CO', '2C-CO-O2', '2C-CO-O2'],
'angles-vals': [
1.9146261894377796, 1.9390607989657, 2.0420352248333655, 2.0420352248333655
],
'atom-names': ['CB', 'CG', 'OD1', 'OD2'],
'bonds-names': ['CA-CB', 'CB-CG', 'CG-OD1', 'CG-OD2'],
'bonds-types': ['CX-2C', '2C-CO', 'CO-O2', 'CO-O2'],
'bonds-vals': [1.526, 1.522, 1.25, 1.25],
'torsion-names': ['C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-OD1', 'CA-CB-CG-OD2'],
'torsion-types': ['C -N -CX-2C', 'N -CX-2C-CO', 'CX-2C-CO-O2', 'CX-2C-CO-O2'],
'torsion-vals': ['p', 'p', 'p', 'i']
},
'C': {
'angles-names': ['N-CA-CB', 'CA-CB-SG'],
'angles-types': ['N -CX-2C', 'CX-2C-SH'],
'angles-vals': [1.9146261894377796, 1.8954275676658419],
'atom-names': ['CB', 'SG'],
'bonds-names': ['CA-CB', 'CB-SG'],
'bonds-types': ['CX-2C', '2C-SH'],
'bonds-vals': [1.526, 1.81],
'torsion-names': ['C-N-CA-CB', 'N-CA-CB-SG'],
'torsion-types': ['C -N -CX-2C', 'N -CX-2C-SH'],
'torsion-vals': ['p', 'p']
},
'Q': {
'angles-names': ['N-CA-CB', 'CA-CB-CG', 'CB-CG-CD', 'CG-CD-OE1', 'CG-CD-NE2'],
'angles-types': ['N -CX-2C', 'CX-2C-2C', '2C-2C-C ', '2C-C -O ', '2C-C -N '],
'angles-vals': [
1.9146261894377796, 1.911135530933791, 1.9390607989657, 2.101376419401173,
2.035053907825388
],
'atom-names': ['CB', 'CG', 'CD', 'OE1', 'NE2'],
'bonds-names': ['CA-CB', 'CB-CG', 'CG-CD', 'CD-OE1', 'CD-NE2'],
'bonds-types': ['CX-2C', '2C-2C', '2C-C ', 'C -O ', 'C -N '],
'bonds-vals': [1.526, 1.526, 1.522, 1.229, 1.335],
'torsion-names': [
'C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-CD', 'CB-CG-CD-OE1', 'CB-CG-CD-NE2'
],
'torsion-types': [
'C -N -CX-2C', 'N -CX-2C-2C', 'CX-2C-2C-C ', '2C-2C-C -O ', '2C-2C-C -N '
],
'torsion-vals': ['p', 'p', 'p', 'p', 'i']
},
'E': {
'angles-names': ['N-CA-CB', 'CA-CB-CG', 'CB-CG-CD', 'CG-CD-OE1', 'CG-CD-OE2'],
'angles-types': ['N -CX-2C', 'CX-2C-2C', '2C-2C-CO', '2C-CO-O2', '2C-CO-O2'],
'angles-vals': [
1.9146261894377796, 1.911135530933791, 1.9390607989657, 2.0420352248333655,
2.0420352248333655
],
'atom-names': ['CB', 'CG', 'CD', 'OE1', 'OE2'],
'bonds-names': ['CA-CB', 'CB-CG', 'CG-CD', 'CD-OE1', 'CD-OE2'],
'bonds-types': ['CX-2C', '2C-2C', '2C-CO', 'CO-O2', 'CO-O2'],
'bonds-vals': [1.526, 1.526, 1.522, 1.25, 1.25],
'torsion-names': [
'C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-CD', 'CB-CG-CD-OE1', 'CB-CG-CD-OE2'
],
'torsion-types': [
'C -N -CX-2C', 'N -CX-2C-2C', 'CX-2C-2C-CO', '2C-2C-CO-O2', '2C-2C-CO-O2'
],
'torsion-vals': ['p', 'p', 'p', 'p', 'i']
},
'G': {
'angles-names': [],
'angles-types': [],
'angles-vals': [],
'atom-names': [],
'bonds-names': [],
'bonds-types': [],
'bonds-vals': [],
'torsion-names': [],
'torsion-types': [],
'torsion-vals': []
},
'H': {
'angles-names': [
'N-CA-CB', 'CA-CB-CG', 'CB-CG-ND1', 'CG-ND1-CE1', 'ND1-CE1-NE2', 'CE1-NE2-CD2'
],
'angles-types': [
'N -CX-CT', 'CX-CT-CC', 'CT-CC-NA', 'CC-NA-CR', 'NA-CR-NB', 'CR-NB-CV'
],
'angles-vals': [
1.9146261894377796, 1.9739673840055867, 2.0943951023931953,
1.8849555921538759, 1.8849555921538759, 1.8849555921538759
],
'atom-names': ['CB', 'CG', 'ND1', 'CE1', 'NE2', 'CD2'],
'bonds-names': ['CA-CB', 'CB-CG', 'CG-ND1', 'ND1-CE1', 'CE1-NE2', 'NE2-CD2'],
'bonds-types': ['CX-CT', 'CT-CC', 'CC-NA', 'NA-CR', 'CR-NB', 'NB-CV'],
'bonds-vals': [1.526, 1.504, 1.385, 1.343, 1.335, 1.394],
'torsion-names': [
'C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-ND1', 'CB-CG-ND1-CE1', 'CG-ND1-CE1-NE2',
'ND1-CE1-NE2-CD2'
],
'torsion-types': [
'C -N -CX-CT', 'N -CX-CT-CC', 'CX-CT-CC-NA', 'CT-CC-NA-CR', 'CC-NA-CR-NB',
'NA-CR-NB-CV'
],
'torsion-vals': ['p', 'p', 'p', 3.141592653589793, 0.0, 0.0]
},
'I': {
'angles-names': ['N-CA-CB', 'CA-CB-CG1', 'CB-CG1-CD1', 'CA-CB-CG2'],
'angles-types': ['N -CX-3C', 'CX-3C-2C', '3C-2C-CT', 'CX-3C-CT'],
'angles-vals': [
1.9146261894377796, 1.911135530933791, 1.911135530933791, 1.911135530933791
],
'atom-names': ['CB', 'CG1', 'CD1', 'CG2'],
'bonds-names': ['CA-CB', 'CB-CG1', 'CG1-CD1', 'CB-CG2'],
'bonds-types': ['CX-3C', '3C-2C', '2C-CT', '3C-CT'],
'bonds-vals': [1.526, 1.526, 1.526, 1.526],
'torsion-names': ['C-N-CA-CB', 'N-CA-CB-CG1', 'CA-CB-CG1-CD1', 'N-CA-CB-CG2'],
'torsion-types': ['C -N -CX-3C', 'N -CX-3C-2C', 'CX-3C-2C-CT', 'N -CX-3C-CT'],
'torsion-vals': ['p', 'p', 'p', 'p']
},
'L': {
'angles-names': ['N-CA-CB', 'CA-CB-CG', 'CB-CG-CD1', 'CB-CG-CD2'],
'angles-types': ['N -CX-2C', 'CX-2C-3C', '2C-3C-CT', '2C-3C-CT'],
'angles-vals': [
1.9146261894377796, 1.911135530933791, 1.911135530933791, 1.911135530933791
],
'atom-names': ['CB', 'CG', 'CD1', 'CD2'],
'bonds-names': ['CA-CB', 'CB-CG', 'CG-CD1', 'CG-CD2'],
'bonds-types': ['CX-2C', '2C-3C', '3C-CT', '3C-CT'],
'bonds-vals': [1.526, 1.526, 1.526, 1.526],
'torsion-names': ['C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-CD1', 'CA-CB-CG-CD2'],
'torsion-types': ['C -N -CX-2C', 'N -CX-2C-3C', 'CX-2C-3C-CT', 'CX-2C-3C-CT'],
'torsion-vals': ['p', 'p', 'p', 'p']
},
'K': {
'angles-names': ['N-CA-CB', 'CA-CB-CG', 'CB-CG-CD', 'CG-CD-CE', 'CD-CE-NZ'],
'angles-types': ['N -CX-C8', 'CX-C8-C8', 'C8-C8-C8', 'C8-C8-C8', 'C8-C8-N3'],
'angles-vals': [
1.9146261894377796, 1.911135530933791, 1.911135530933791, 1.911135530933791,
1.9408061282176945
],
'atom-names': ['CB', 'CG', 'CD', 'CE', 'NZ'],
'bonds-names': ['CA-CB', 'CB-CG', 'CG-CD', 'CD-CE', 'CE-NZ'],
'bonds-types': ['CX-C8', 'C8-C8', 'C8-C8', 'C8-C8', 'C8-N3'],
'bonds-vals': [1.526, 1.526, 1.526, 1.526, 1.471],
'torsion-names': [
'C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-CD', 'CB-CG-CD-CE', 'CG-CD-CE-NZ'
],
'torsion-types': [
'C -N -CX-C8', 'N -CX-C8-C8', 'CX-C8-C8-C8', 'C8-C8-C8-C8', 'C8-C8-C8-N3'
],
'torsion-vals': ['p', 'p', 'p', 'p', 'p']
},
'M': {
'angles-names': ['N-CA-CB', 'CA-CB-CG', 'CB-CG-SD', 'CG-SD-CE'],
'angles-types': ['N -CX-2C', 'CX-2C-2C', '2C-2C-S ', '2C-S -CT'],
'angles-vals': [
1.9146261894377796, 1.911135530933791, 2.0018926520374962, 1.726130630222392
],
'atom-names': ['CB', 'CG', 'SD', 'CE'],
'bonds-names': ['CA-CB', 'CB-CG', 'CG-SD', 'SD-CE'],
'bonds-types': ['CX-2C', '2C-2C', '2C-S ', 'S -CT'],
'bonds-vals': [1.526, 1.526, 1.81, 1.81],
'torsion-names': ['C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-SD', 'CB-CG-SD-CE'],
'torsion-types': ['C -N -CX-2C', 'N -CX-2C-2C', 'CX-2C-2C-S ', '2C-2C-S -CT'],
'torsion-vals': ['p', 'p', 'p', 'p']
},
'F': {
'angles-names': [
'N-CA-CB', 'CA-CB-CG', 'CB-CG-CD1', 'CG-CD1-CE1', 'CD1-CE1-CZ', 'CE1-CZ-CE2',
'CZ-CE2-CD2'
],
'angles-types': [
'N -CX-CT', 'CX-CT-CA', 'CT-CA-CA', 'CA-CA-CA', 'CA-CA-CA', 'CA-CA-CA',
'CA-CA-CA'
],
'angles-vals': [
1.9146261894377796, 1.9896753472735358, 2.0943951023931953,
2.0943951023931953, 2.0943951023931953, 2.0943951023931953, 2.0943951023931953
],
'atom-names': ['CB', 'CG', 'CD1', 'CE1', 'CZ', 'CE2', 'CD2'],
'bonds-names': [
'CA-CB', 'CB-CG', 'CG-CD1', 'CD1-CE1', 'CE1-CZ', 'CZ-CE2', 'CE2-CD2'
],
'bonds-types': ['CX-CT', 'CT-CA', 'CA-CA', 'CA-CA', 'CA-CA', 'CA-CA', 'CA-CA'],
'bonds-vals': [1.526, 1.51, 1.4, 1.4, 1.4, 1.4, 1.4],
'torsion-names': [
'C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-CD1', 'CB-CG-CD1-CE1', 'CG-CD1-CE1-CZ',
'CD1-CE1-CZ-CE2', 'CE1-CZ-CE2-CD2'
],
'torsion-types': [
'C -N -CX-CT', 'N -CX-CT-CA', 'CX-CT-CA-CA', 'CT-CA-CA-CA', 'CA-CA-CA-CA',
'CA-CA-CA-CA', 'CA-CA-CA-CA'
],
'torsion-vals': ['p', 'p', 'p', 3.141592653589793, 0.0, 0.0, 0.0]
},
'P': {
'angles-names': ['N-CA-CB', 'CA-CB-CG', 'CB-CG-CD'],
'angles-types': ['N -CX-CT', 'CX-CT-CT', 'CT-CT-CT'],
'angles-vals': [1.9146261894377796, 1.911135530933791, 1.911135530933791],
'atom-names': ['CB', 'CG', 'CD'],
'bonds-names': ['CA-CB', 'CB-CG', 'CG-CD'],
'bonds-types': ['CX-CT', 'CT-CT', 'CT-CT'],
'bonds-vals': [1.526, 1.526, 1.526],
'torsion-names': ['C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-CD'],
'torsion-types': ['C -N -CX-CT', 'N -CX-CT-CT', 'CX-CT-CT-CT'],
'torsion-vals': ['p', 'p', 'p']
},
'S': {
'angles-names': ['N-CA-CB', 'CA-CB-OG'],
'angles-types': ['N -CX-2C', 'CX-2C-OH'],
'angles-vals': [1.9146261894377796, 1.911135530933791],
'atom-names': ['CB', 'OG'],
'bonds-names': ['CA-CB', 'CB-OG'],
'bonds-types': ['CX-2C', '2C-OH'],
'bonds-vals': [1.526, 1.41],
'torsion-names': ['C-N-CA-CB', 'N-CA-CB-OG'],
'torsion-types': ['C -N -CX-2C', 'N -CX-2C-OH'],
'torsion-vals': ['p', 'p']
},
'T': {
'angles-names': ['N-CA-CB', 'CA-CB-OG1', 'CA-CB-CG2'],
'angles-types': ['N -CX-3C', 'CX-3C-OH', 'CX-3C-CT'],
'angles-vals': [1.9146261894377796, 1.911135530933791, 1.911135530933791],
'atom-names': ['CB', 'OG1', 'CG2'],
'bonds-names': ['CA-CB', 'CB-OG1', 'CB-CG2'],
'bonds-types': ['CX-3C', '3C-OH', '3C-CT'],
'bonds-vals': [1.526, 1.41, 1.526],
'torsion-names': ['C-N-CA-CB', 'N-CA-CB-OG1', 'N-CA-CB-CG2'],
'torsion-types': ['C -N -CX-3C', 'N -CX-3C-OH', 'N -CX-3C-CT'],
'torsion-vals': ['p', 'p', 'p']
},
'W': {
'angles-names': [
'N-CA-CB', 'CA-CB-CG', 'CB-CG-CD1', 'CG-CD1-NE1', 'CD1-NE1-CE2',
'NE1-CE2-CZ2', 'CE2-CZ2-CH2', 'CZ2-CH2-CZ3', 'CH2-CZ3-CE3', 'CZ3-CE3-CD2'
],
'angles-types': [
'N -CX-CT', 'CX-CT-C*', 'CT-C*-CW', 'C*-CW-NA', 'CW-NA-CN', 'NA-CN-CA',
'CN-CA-CA', 'CA-CA-CA', 'CA-CA-CA', 'CA-CA-CB'
],
'angles-vals': [
1.9146261894377796, 2.0176006153054447, 2.181661564992912, 1.8971728969178363,
1.9477874452256716, 2.3177972466484698, 2.0943951023931953,
2.0943951023931953, 2.0943951023931953, 2.0943951023931953
],
'atom-names': [
'CB', 'CG', 'CD1', 'NE1', 'CE2', 'CZ2', 'CH2', 'CZ3', 'CE3', 'CD2'
],
'bonds-names': [
'CA-CB', 'CB-CG', 'CG-CD1', 'CD1-NE1', 'NE1-CE2', 'CE2-CZ2', 'CZ2-CH2',
'CH2-CZ3', 'CZ3-CE3', 'CE3-CD2'
],
'bonds-types': [
'CX-CT', 'CT-C*', 'C*-CW', 'CW-NA', 'NA-CN', 'CN-CA', 'CA-CA', 'CA-CA',
'CA-CA', 'CA-CB'
],
'bonds-vals': [1.526, 1.495, 1.352, 1.381, 1.38, 1.4, 1.4, 1.4, 1.4, 1.404],
'torsion-names': [
'C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-CD1', 'CB-CG-CD1-NE1', 'CG-CD1-NE1-CE2',
'CD1-NE1-CE2-CZ2', 'NE1-CE2-CZ2-CH2', 'CE2-CZ2-CH2-CZ3', 'CZ2-CH2-CZ3-CE3',
'CH2-CZ3-CE3-CD2'
],
'torsion-types': [
'C -N -CX-CT', 'N -CX-CT-C*', 'CX-CT-C*-CW', 'CT-C*-CW-NA', 'C*-CW-NA-CN',
'CW-NA-CN-CA', 'NA-CN-CA-CA', 'CN-CA-CA-CA', 'CA-CA-CA-CA', 'CA-CA-CA-CB'
],
'torsion-vals': [
'p', 'p', 'p', 3.141592653589793, 0.0, 3.141592653589793, 3.141592653589793,
0.0, 0.0, 0.0
]
},
'Y': {
'angles-names': [
'N-CA-CB', 'CA-CB-CG', 'CB-CG-CD1', 'CG-CD1-CE1', 'CD1-CE1-CZ', 'CE1-CZ-OH',
'CE1-CZ-CE2', 'CZ-CE2-CD2'
],
'angles-types': [
'N -CX-CT', 'CX-CT-CA', 'CT-CA-CA', 'CA-CA-CA', 'CA-CA-C ', 'CA-C -OH',
'CA-C -CA', 'C -CA-CA'
],
'angles-vals': [
1.9146261894377796, 1.9896753472735358, 2.0943951023931953,
2.0943951023931953, 2.0943951023931953, 2.0943951023931953,
2.0943951023931953, 2.0943951023931953
],
'atom-names': ['CB', 'CG', 'CD1', 'CE1', 'CZ', 'OH', 'CE2', 'CD2'],
'bonds-names': [
'CA-CB', 'CB-CG', 'CG-CD1', 'CD1-CE1', 'CE1-CZ', 'CZ-OH', 'CZ-CE2', 'CE2-CD2'
],
'bonds-types': [
'CX-CT', 'CT-CA', 'CA-CA', 'CA-CA', 'CA-C ', 'C -OH', 'C -CA', 'CA-CA'
],
'bonds-vals': [1.526, 1.51, 1.4, 1.4, 1.409, 1.364, 1.409, 1.4],
'torsion-names': [
'C-N-CA-CB', 'N-CA-CB-CG', 'CA-CB-CG-CD1', 'CB-CG-CD1-CE1', 'CG-CD1-CE1-CZ',
'CD1-CE1-CZ-OH', 'CD1-CE1-CZ-CE2', 'CE1-CZ-CE2-CD2'
],
'torsion-types': [
'C -N -CX-CT', 'N -CX-CT-CA', 'CX-CT-CA-CA', 'CT-CA-CA-CA', 'CA-CA-CA-C ',
'CA-CA-C -OH', 'CA-CA-C -CA', 'CA-C -CA-CA'
],
'torsion-vals': [
'p', 'p', 'p', 3.141592653589793, 0.0, 3.141592653589793, 0.0, 0.0
]
},
'V': {
'angles-names': ['N-CA-CB', 'CA-CB-CG1', 'CA-CB-CG2'],
'angles-types': ['N -CX-3C', 'CX-3C-CT', 'CX-3C-CT'],
'angles-vals': [1.9146261894377796, 1.911135530933791, 1.911135530933791],
'atom-names': ['CB', 'CG1', 'CG2'],
'bonds-names': ['CA-CB', 'CB-CG1', 'CB-CG2'],
'bonds-types': ['CX-3C', '3C-CT', '3C-CT'],
'bonds-vals': [1.526, 1.526, 1.526],
'torsion-names': ['C-N-CA-CB', 'N-CA-CB-CG1', 'N-CA-CB-CG2'],
'torsion-types': ['C -N -CX-3C', 'N -CX-3C-CT', 'N -CX-3C-CT'],
'torsion-vals': ['p', 'p', 'p']
},
'_': {
'angles-names': [],
'angles-types': [],
'angles-vals': [],
'atom-names': [],
'bonds-names': [],
'bonds-types': [],
'bonds-vals': [],
'torsion-names': [],
'torsion-types': [],
'torsion-vals': []
}
}
BB_BUILD_INFO = {
"BONDLENS": {
# the updated is according to crystal data from 1DPE_1_A and validated with other structures
# the commented is the sidechainnet one
'n-ca': 1.4664931, # 1.442,
'ca-c': 1.524119, # 1.498,
'c-n': 1.3289373, # 1.379,
'c-o': 1.229, # From parm10.dat || huge variability according to structures
# we get 1.3389416 from 1DPE_1_A but also 1.2289 from 2F2H_d2f2hf1
'c-oh': 1.364
}, # From parm10.dat, for OXT
# For placing oxygens
"BONDANGS": {
'ca-c-o': 2.0944, # Approximated to be 2pi / 3; parm10.dat says 2.0350539
'ca-c-oh': 2.0944
}, # Equal to 'ca-c-o', for OXT
"BONDTORSIONS": {
'n-ca-c-n': -0.785398163
} # A simple approximation, not meant to be exact.
}
#################
##### DOERS #####
#################
def make_cloud_mask(aa):
""" relevent points will be 1. paddings will be 0. """
mask = np.zeros(14)
if aa != "_":
n_atoms = 4+len( SC_BUILD_INFO[aa]["atom-names"] )
mask[:n_atoms] = 1
return mask
def make_bond_mask(aa):
""" Gives the length of the bond originating each atom. """
mask = np.zeros(14)
# backbone
mask[0] = BB_BUILD_INFO["BONDLENS"]['c-n']
mask[1] = BB_BUILD_INFO["BONDLENS"]['n-ca']
mask[2] = BB_BUILD_INFO["BONDLENS"]['ca-c']
mask[3] = BB_BUILD_INFO["BONDLENS"]['c-o']
# sidechain - except padding token
if aa in SC_BUILD_INFO.keys():
for i,bond in enumerate(SC_BUILD_INFO[aa]['bonds-vals']):
mask[4+i] = bond
return mask
def make_theta_mask(aa):
""" Gives the theta of the bond originating each atom. """
mask = np.zeros(14)
# backbone
#
# sidechain
for i,theta in enumerate(SC_BUILD_INFO[aa]['angles-vals']):
mask[4+i] = theta
return mask
def make_torsion_mask(aa):
""" Gives the dihedral of the bond originating each atom. """
mask = np.zeros(14)
# backbone
#
# sidechain
for i, torsion in enumerate(SC_BUILD_INFO[aa]['torsion-vals']):
if torsion == 'p':
mask[4+i] = np.nan
elif torsion == "i":
# https://github.com/jonathanking/sidechainnet/blob/master/sidechainnet/structure/StructureBuilder.py#L372
mask[4+i] = 999 # anotate to change later # mask[4+i-1] - np.pi
else:
mask[4+i] = torsion
return mask
def make_idx_mask(aa):
""" Gives the idxs of the 3 previous points. """
mask = np.zeros((11, 3))
# backbone
mask[0, :] = np.arange(3)
# sidechain
mapper = {"N": 0, "CA": 1, "C":2, "CB": 4}
for i, torsion in enumerate(SC_BUILD_INFO[aa]['torsion-names']):
# get all the atoms forming the dihedral
torsions = [x.rstrip(" ") for x in torsion.split("-")]
# for each atom
for n, torsion in enumerate(torsions[:-1]):
# get the index of the atom in the coords array
loc = mapper[torsion] if torsion in mapper.keys() else 4 + SC_BUILD_INFO[aa]['atom-names'].index(torsion)
# set position to index
mask[i+1][n] = loc
return mask
###################
##### GETTERS #####
###################
SUPREME_INFO = {k: {"cloud_mask": make_cloud_mask(k),
"bond_mask": make_bond_mask(k),
"theta_mask": make_theta_mask(k),
"torsion_mask": make_torsion_mask(k),
"idx_mask": make_idx_mask(k),
}
for k in "ARNDCQEGHILKMFPSTWYV_"}
# @jit()
def scn_cloud_mask(seq, coords=None):
""" Gets the boolean mask atom positions (not all aas have same atoms).
Inputs:
* seqs: (length) iterable of 1-letter aa codes of a protein
* coords: optional .(batch, lc, 3). sidechainnet coords.
returns the true mask (solves potential atoms that might not be provided)
Outputs: (length, 14) boolean mask
"""
if coords is not None:
return (( rearrange(coords, '... (l c) d -> ... l c d', c=14) == 0 ).sum(dim=-1) < coords.shape[-1]).float().cpu()
return torch.tensor([SUPREME_INFO[aa]['cloud_mask'] for aa in seq])
def scn_bond_mask(seq):
""" Inputs:
* seqs: (length). iterable of 1-letter aa codes of a protein
Outputs: (L, 14) maps point to bond length
"""
return torch.tensor([SUPREME_INFO[aa]['bond_mask'] for aa in seq])
def scn_angle_mask(seq, angles):
""" Inputs:
* seq: (length). iterable of 1-letter aa codes of a protein
* angles: (length, 12). [phi, psi, omega, b_angle(n_ca_c), b_angle(ca_c_n), b_angle(c_n_ca), 6_scn_torsions]
Outputs: (L, 14) maps point to theta and dihedral.
first angle is theta, second is dihedral
"""
device, precise = angles.device, angles.type()
angles = angles
# get masks
theta_mask = torch.tensor([SUPREME_INFO[aa]['theta_mask'] for aa in seq]).type(precise)
torsion_mask = torch.tensor([SUPREME_INFO[aa]['torsion_mask'] for aa in seq]).type(precise)
# fill masks with angle values
theta_mask[:, 0] = angles[:, 4] # ca_c_n
theta_mask[1:, 1] = angles[:-1, 5] # c_n_ca
theta_mask[:, 2] = angles[:, 3] # n_ca_c
theta_mask[:, 3] = BB_BUILD_INFO["BONDANGS"]["ca-c-o"]
# backbone_torsions
torsion_mask[:, 0] = angles[:, 1] # n determined by psi of previous
torsion_mask[1:, 1] = angles[:-1, 2] # ca determined by omega of previous
torsion_mask[:, 2] = angles[:, 0] # c determined by phi
# O placement - same as in sidechainnet
# https://github.com/jonathanking/sidechainnet/blob/master/sidechainnet/structure/StructureBuilder.py#L313der.py#L313
torsion_mask[:, 3] = angles[:, 1] - np.pi
torsion_mask[-1, 3] += np.pi
# add torsions to sidechains
to_fill = torsion_mask != torsion_mask # "p" fill with passed values
to_pick = torsion_mask == 999 # "i" infer from previous one
for i in range(len(seq)):
# check if any is nan -> fill the holes
number = to_fill[i].long().sum()
torsion_mask[i, to_fill[i]] = angles[i, 6:6+number]
# pick previous value for inferred torsions
for j, val in enumerate(to_pick[i]):
if val:
torsion_mask[i, j] = torsion_mask[i, j-1] - np.pi # pick values from last one.
return torch.stack([theta_mask, torsion_mask], dim=0).to(device)
def scn_index_mask(seq):
""" Inputs:
* seq: (length). iterable of 1-letter aa codes of a protein
Outputs: (L, 11, 3) maps point to theta and dihedral.
first angle is theta, second is dihedral
"""
idxs = torch.tensor([SUPREME_INFO[aa]['idx_mask'] for aa in seq])
return rearrange(idxs, 'l s d -> d l s')
def build_scaffolds_from_scn_angles(seq, angles, coords=None, device="auto"):
""" Builds scaffolds for fast access to data
Inputs:
* seq: string of aas (1 letter code)
* angles: (L, 12) tensor containing the internal angles.
Distributed as follows (following sidechainnet convention):
* (L, 3) for torsion angles
* (L, 3) bond angles
* (L, 6) sidechain angles
* coords: (L, 3) sidechainnet coords. builds the mask with those instead
(better accuracy if modified residues present).
Outputs:
* cloud_mask: (L, 14 ) mask of points that should be converted to coords
* point_ref_mask: (3, L, 11) maps point (except n-ca-c) to idxs of
previous 3 points in the coords array
* angles_mask: (2, L, 14) maps point to theta and dihedral
* bond_mask: (L, 14) gives the length of the bond originating that atom
"""
# auto infer device and precision
precise = angles.type()
if device == "auto":
device = angles.device
if coords is not None:
cloud_mask = scn_cloud_mask(seq, coords=coords)
else:
cloud_mask = scn_cloud_mask(seq)
cloud_mask = torch.tensor(cloud_mask).bool().to(device)
point_ref_mask = torch.tensor(scn_index_mask(seq)).long().to(device)
angles_mask = torch.tensor(scn_angle_mask(seq, angles)).type(precise).to(device)
bond_mask = torch.tensor(scn_bond_mask(seq)).type(precise).to(device)
# return all in a dict
return {"cloud_mask": cloud_mask,
"point_ref_mask": point_ref_mask,
"angles_mask": angles_mask,
"bond_mask": bond_mask }
#############################
####### ENCODERS ############
#############################
def modify_scaffolds_with_coords(scaffolds, coords):
""" Gets scaffolds and fills in the right data.
Inputs:
* scaffolds: dict. as returned by `build_scaffolds_from_scn_angles`
* coords: (L, 14, 3). sidechainnet tensor. same device as scaffolds
Outputs: corrected scaffolds
"""
# calculate distances and update:
# N, CA, C
scaffolds["bond_mask"][1:, 0] = torch.norm(coords[1:, 0] - coords[:-1, 2], dim=-1) # N
scaffolds["bond_mask"][ :, 1] = torch.norm(coords[ :, 1] - coords[: , 0], dim=-1) # CA
scaffolds["bond_mask"][ :, 2] = torch.norm(coords[ :, 2] - coords[: , 1], dim=-1) # C
# O, CB, side chain
selector = np.arange(len(coords))
for i in range(3, 14):
# get indexes
idx_a, idx_b, idx_c = scaffolds["point_ref_mask"][:, :, i-3] # (3, L, 11) -> 3 * (L, 11)
# correct distances
scaffolds["bond_mask"][:, i] = torch.norm(coords[:, i] - coords[selector, idx_c], dim=-1)
# get angles
scaffolds["angles_mask"][0, :, i] = get_angle(coords[selector, idx_b],
coords[selector, idx_c],
coords[:, i])
# handle C-beta, where the C requested is from the previous aa
if i == 4:
# for 1st residue, use position of the second residue's N
first_next_n = coords[1, :1] # 1, 3
# the c requested is from the previous residue
main_c_prev_idxs = coords[selector[:-1], idx_a[1:]]# (L-1), 3
# concat
coords_a = torch.cat([first_next_n, main_c_prev_idxs])
else:
coords_a = coords[selector, idx_a]
# get dihedrals
scaffolds["angles_mask"][1, :, i] = get_dihedral(coords_a,
coords[selector, idx_b],
coords[selector, idx_c],
coords[:, i])
# correct angles and dihedrals for backbone
scaffolds["angles_mask"][0, :-1, 0] = get_angle(coords[:-1, 1], coords[:-1, 2], coords[1: , 0]) # ca_c_n
scaffolds["angles_mask"][0, 1:, 1] = get_angle(coords[:-1, 2], coords[1:, 0], coords[1: , 1]) # c_n_ca
scaffolds["angles_mask"][0, :, 2] = get_angle(coords[:, 0], coords[ :, 1], coords[ : , 2]) # n_ca_c
# N determined by previous psi = f(n, ca, c, n+1)
scaffolds["angles_mask"][1, :-1, 0] = get_dihedral(coords[:-1, 0], coords[:-1, 1], coords[:-1, 2], coords[1:, 0])
# CA determined by omega = f(ca, c, n+1, ca+1)
scaffolds["angles_mask"][1, 1:, 1] = get_dihedral(coords[:-1, 1], coords[:-1, 2], coords[1:, 0], coords[1:, 1])
# C determined by phi = f(c-1, n, ca, c)
scaffolds["angles_mask"][1, 1:, 2] = get_dihedral(coords[:-1, 2], coords[1:, 0], coords[1:, 1], coords[1:, 2])
return scaffolds
if __name__ == "__main__":
print(scn_cloud_mask("AAAA"))
| geometric-vector-perceptron-main | examples/data_handler.py |
geometric-vector-perceptron-main | examples/__init__.py |
|
import gc
from argparse import ArgumentParser
from functools import partial
from pathlib import Path
from pprint import pprint
import matplotlib.pyplot as plt
import numpy as np
import pytorch_lightning as pl
import torch
from einops import rearrange
from loguru import logger
from pytorch_lightning.callbacks import (
GPUStatsMonitor,
LearningRateMonitor,
ModelCheckpoint,
ProgressBar,
)
from pytorch_lightning.loggers import TensorBoardLogger
from examples.data_handler import kabsch_torch, scn_cloud_mask
from examples.data_utils import (
encode_whole_bonds,
encode_whole_protein,
from_encode_to_pred,
prot_covalent_bond,
)
from examples.scn_data_module import ScnDataModule
from geometric_vector_perceptron.geometric_vector_perceptron import GVP_Network
class StructureModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_parser):
# model
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--depth", type=int, default=4)
parser.add_argument("--cutoffs", type=float, default=1.0)
parser.add_argument("--noise", type=float, default=1.0)
# optimizer & scheduler
parser.add_argument("--init_lr", type=float, default=1e-3)
return parser
def __init__(
self,
depth: int = 1,
cutoffs: float = 1.0,
noise: float = 1.0,
init_lr: float = 1e-3,
**kwargs,
):
super().__init__()
self.save_hyperparameters()
self.needed_info = {
"cutoffs": [cutoffs], # -1e-3 for just covalent, "30_closest", 5. for under 5A, etc
"bond_scales": [1, 2, 4],
"aa_pos_scales": [1, 2, 4, 8, 16, 32, 64, 128],
"atom_pos_scales": [1, 2, 4, 8, 16, 32],
"dist2ca_norm_scales": [1, 2, 4],
"bb_norms_atoms": [0.5], # will encode 3 vectors with this
}
self.model = GVP_Network(
n_layers=depth,
feats_x_in=48,
vectors_x_in=7,
feats_x_out=48,
vectors_x_out=7,
feats_edge_in=8,
vectors_edge_in=1,
feats_edge_out=8,
vectors_edge_out=1,
embedding_nums=[36, 20],
embedding_dims=[16, 16],
edge_embedding_nums=[2],
edge_embedding_dims=[2],
residual=True,
recalc=1
)
self.noise = noise
self.init_lr = init_lr
self.baseline_losses = []
self.epoch_losses = []
def forward(self, seq, true_coords, angles, padding_seq, mask):
needed_info = self.needed_info
device = true_coords.device
needed_info["seq"] = seq[: (-padding_seq) or None]
needed_info["covalent_bond"] = prot_covalent_bond(needed_info["seq"])
pre_target = encode_whole_protein(
seq,
true_coords,
angles,
padding_seq,
needed_info=needed_info,
free_mem=True,
)
pre_target_x, _, _, embedd_info = pre_target
encoded = encode_whole_protein(
seq,
true_coords + self.noise * torch.randn_like(true_coords),
angles,
padding_seq,
needed_info=needed_info,
free_mem=True,
)
x, edge_index, edge_attrs, embedd_info = encoded
batch = torch.tensor([0 for i in range(x.shape[0])], device=x.device).long()
# add position coords
cloud_mask = scn_cloud_mask(seq[: (-padding_seq) or None]).to(device)
# cloud is all points, chain is all for which we have labels
chain_mask = mask[: (-padding_seq) or None].unsqueeze(-1) * cloud_mask
flat_chain_mask = rearrange(chain_mask.bool(), "l c -> (l c)")
cloud_mask = cloud_mask.bool()
flat_cloud_mask = rearrange(cloud_mask, "l c -> (l c)")
recalc_edge = partial(
encode_whole_bonds,
x_format="encode",
embedd_info=embedd_info,
needed_info=needed_info,
free_mem=True,
)
# predict
scores = self.model.forward(
x,
edge_index,
batch=batch,
edge_attr=edge_attrs,
recalc_edge=recalc_edge,
verbose=False,
)
# format pred, baseline and target
target = from_encode_to_pred(
pre_target_x, embedd_info=embedd_info, needed_info=needed_info
)
pred = from_encode_to_pred(
scores, embedd_info=embedd_info, needed_info=needed_info
)
base = from_encode_to_pred(x, embedd_info=embedd_info, needed_info=needed_info)
# MEASURE ERROR
# option 1: loss is MSE on output tokens
# loss_ = (target-pred)**2
# loss = loss_.mean()
# option 2: loss is RMSD on reconstructed coords
target_coords = target[:, 3:4] * target[:, :3]
pred_coords = pred[:, 3:4] * pred[:, :3]
base_coords = base[:, 3:4] * base[:, :3]
## align - sometimes svc fails - idk why
try:
pred_aligned, target_aligned = kabsch_torch(pred_coords.t(), target_coords.t()) # (3, N)
base_aligned, _ = kabsch_torch(base_coords.t(), target_coords.t())
loss = ( (pred_aligned.t() - target_aligned.t())[flat_chain_mask[flat_cloud_mask]]**2 ).mean()**0.5
loss_base = ( (base_aligned.t() - target_aligned.t())[flat_chain_mask[flat_cloud_mask]]**2 ).mean()**0.5
except:
pred_aligned, target_aligned = None, None
print("svd failed convergence, ep:", ep)
loss = ( (pred_coords.t() - target_coords.t())[flat_chain_mask[flat_cloud_mask]]**2 ).mean()**0.5
loss_base = ( (base_coords - target_coords)[flat_chain_mask[flat_cloud_mask]]**2 ).mean()**0.5
# free gpu mem
del true_coords, angles, pre_target_x, edge_index, edge_attrs
del scores, target_coords, pred_coords, base_coords
del encoded, pre_target, target_aligned, pred_aligned
gc.collect()
# return loss
return {"loss": loss, "loss_base": loss_base}
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.init_lr)
return optimizer
def on_train_start(self) -> None:
self.baseline_losses = []
self.epoch_losses = []
def training_step(self, batch, batch_idx):
output = self.forward(**batch)
loss = output["loss"]
loss_base = output["loss_base"]
if loss is None or torch.isnan(loss):
return None
self.epoch_losses.append(loss.item())
self.baseline_losses.append(loss_base.item())
self.log("train_loss", loss, on_epoch=True, prog_bar=True)
self.log("train_loss_base", output["loss_base"], on_epoch=True, prog_bar=False)
return loss
def on_train_end(self) -> None:
plt.figure(figsize=(15, 6))
plt.title(
f"Loss Evolution - Denoising of Gaussian-masked Coordinates (mu=0, sigma={self.noise})"
)
plt.plot(self.epoch_losses, label="train loss step")
for window in [8, 16, 32]:
plt.plot(
[
np.mean(self.epoch_losses[:window][0 : i + 1])
for i in range(min(window, len(self.epoch_losses)))
]
+ [
np.mean(self.epoch_losses[i : i + window + 1])
for i in range(len(self.epoch_losses) - window)
],
label="Window mean n={0}".format(window),
)
plt.plot(
np.ones(len(self.epoch_losses)) * np.mean(self.baseline_losses),
"k--",
label="Baseline",
)
plt.xlim(-0.01 * len(self.epoch_losses), 1.01 * len(self.epoch_losses))
plt.ylabel("RMSD")
plt.xlabel("Batch number")
plt.legend()
plt.savefig("loss.pdf")
def validation_step(self, batch, batch_idx):
output = self.forward(**batch)
self.log("val_loss", output["loss"], on_epoch=True, sync_dist=True)
self.log("val_loss_base", output["loss_base"], on_epoch=True, sync_dist=True)
def test_step(self, batch, batch_idx):
output = self.forward(**batch)
self.log("test_loss", output["loss"], on_epoch=True, sync_dist=True)
self.log("test_loss_base", output["loss_base"], on_epoch=True, sync_dist=True)
def get_trainer(args):
pl.seed_everything(args.seed)
# loggers
root_dir = Path(args.default_root_dir).expanduser().resolve()
root_dir.mkdir(parents=True, exist_ok=True)
tb_save_dir = root_dir / "tb"
tb_logger = TensorBoardLogger(save_dir=tb_save_dir)
loggers = [tb_logger]
logger.info(f"Run tensorboard --logdir {tb_save_dir}")
# callbacks
ckpt_cb = ModelCheckpoint(verbose=True)
lr_cb = LearningRateMonitor(logging_interval="step")
pb_cb = ProgressBar(refresh_rate=args.progress_bar_refresh_rate)
callbacks = [lr_cb, pb_cb]
callbacks.append(ckpt_cb)
gpu_cb = GPUStatsMonitor()
callbacks.append(gpu_cb)
plugins = []
trainer = pl.Trainer.from_argparse_args(
args, logger=loggers, callbacks=callbacks, plugins=plugins
)
return trainer
def main(args):
dm = ScnDataModule(**vars(args))
model = StructureModel(**vars(args))
trainer = get_trainer(args)
trainer.fit(model, datamodule=dm)
metrics = trainer.test(model, datamodule=dm)
print("test", metrics)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--seed", type=int, default=23333, help="Seed everything.")
# add model specific args
parser = StructureModel.add_model_specific_args(parser)
# add data specific args
parser = ScnDataModule.add_data_specific_args(parser)
# add trainer args
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
pprint(vars(args))
main(args)
| geometric-vector-perceptron-main | examples/train_lightning.py |
from argparse import ArgumentParser
from typing import List, Optional
from typing import Union
import numpy as np
import pytorch_lightning as pl
import sidechainnet
from sidechainnet.dataloaders.collate import get_collate_fn
from sidechainnet.utils.sequence import ProteinVocabulary
from torch.utils.data import DataLoader, Dataset
class ScnDataset(Dataset):
def __init__(self, dataset, max_len: int):
super(ScnDataset, self).__init__()
self.dataset = dataset
self.max_len = max_len
self.scn_collate_fn = get_collate_fn(False)
self.vocab = ProteinVocabulary()
def collate_fn(self, batch):
batch = self.scn_collate_fn(batch)
real_seqs = [
"".join([self.vocab.int2char(aa) for aa in seq])
for seq in batch.int_seqs.numpy()
]
seq = real_seqs[0][: self.max_len]
true_coords = batch.crds[0].view(-1, 14, 3)[: self.max_len].view(-1, 3)
angles = batch.angs[0, : self.max_len]
mask = batch.msks[0, : self.max_len]
# get padding
padding_seq = (np.array([*seq]) == "_").sum()
return {
"seq": seq,
"true_coords": true_coords,
"angles": angles,
"padding_seq": padding_seq,
"mask": mask,
}
def __getitem__(self, index: int):
return self.dataset[index]
def __len__(self) -> int:
return len(self.dataset)
class ScnDataModule(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--casp_version", type=int, default=7)
parser.add_argument("--scn_dir", type=str, default="./sidechainnet_data")
parser.add_argument("--train_batch_size", type=int, default=1)
parser.add_argument("--eval_batch_size", type=int, default=1)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--train_max_len", type=int, default=256)
parser.add_argument("--eval_max_len", type=int, default=256)
return parser
def __init__(
self,
casp_version: int = 7,
scn_dir: str = "./sidechainnet_data",
train_batch_size: int = 1,
eval_batch_size: int = 1,
num_workers: int = 1,
train_max_len: int = 256,
eval_max_len: int = 256,
**kwargs,
):
super().__init__()
assert train_batch_size == eval_batch_size == 1, "batch size must be 1 for now"
self.casp_version = casp_version
self.scn_dir = scn_dir
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.num_workers = num_workers
self.train_max_len = train_max_len
self.eval_max_len = eval_max_len
def setup(self, stage: Optional[str] = None):
dataloaders = sidechainnet.load(
casp_version=self.casp_version,
scn_dir=self.scn_dir,
with_pytorch="dataloaders",
)
print(
dataloaders.keys()
) # ['train', 'train_eval', 'valid-10', ..., 'valid-90', 'test']
self.train = ScnDataset(dataloaders["train"].dataset, self.train_max_len)
self.val = ScnDataset(dataloaders["valid-90"].dataset, self.eval_max_len)
self.test = ScnDataset(dataloaders["test"].dataset, self.eval_max_len)
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return DataLoader(
self.train,
batch_size=self.train_batch_size,
shuffle=True,
collate_fn=self.train.collate_fn,
num_workers=self.num_workers,
pin_memory=True,
)
def val_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.val,
batch_size=self.eval_batch_size,
shuffle=False,
collate_fn=self.val.collate_fn,
num_workers=self.num_workers,
pin_memory=True,
)
def test_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.test,
batch_size=self.eval_batch_size,
shuffle=False,
collate_fn=self.test.collate_fn,
num_workers=self.num_workers,
pin_memory=True,
)
if __name__ == "__main__":
dm = ScnDataModule()
dm.setup()
train = dm.train_dataloader()
print("train length", len(train))
valid = dm.val_dataloader()
print("valid length", len(valid))
test = dm.test_dataloader()
print("test length", len(test))
for batch in test:
print(batch)
break
| geometric-vector-perceptron-main | examples/scn_data_module.py |
# Author: Eric Alcaide
import os
import sys
# science
import torch
import torch_sparse
import numpy as np
from einops import repeat, rearrange
# custom utils - from https://github.com/EleutherAI/mp_nerf
from data_handler import *
# new data builders
def get_atom_ids_dict():
""" Get's a dict mapping each atom to a token. """
ids = set(["N", "CA", "C", "O"])
for k,v in SC_BUILD_INFO.items():
for name in v["atom-names"]:
ids.add(name)
return {k: i for i,k in enumerate(sorted(ids))}
#################################
##### ORIGINAL PROJECT DATA #####
#################################
AAS = "ARNDCQEGHILKMFPSTWYV_"
AAS2NUM = {k: AAS.index(k) for k in AAS}
ATOM_IDS = get_atom_ids_dict()
# numbers follow the same order as sidechainnet atoms
GVP_DATA = {
'A': {
'bonds': [[0,1], [1,2], [2,3], [1,4]]
},
'R': {
'bonds': [[0,1], [1,2], [2,3], [2,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [8,10]]
},
'N': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[5,7]]
},
'D': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[5,7]]
},
'C': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5]]
},
'Q': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [6,8]]
},
'E': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8]]
},
'G': {
'bonds': [[0,1], [1,2], [2,3]]
},
'H': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [5,9]]
},
'I': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[4,7]]
},
'L': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[5,7]]
},
'K': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8]]
},
'M': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7]]
},
'F': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [9,10], [5,10]]
},
'P': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[0,6]]
},
'S': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5]]
},
'T': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [4,6]]
},
'W': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [9,10], [10,11], [11,12],
[12, 13], [5,13], [8,13]]
},
'Y': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [5,6],
[6,7], [7,8], [8,9], [8,10], [10,11], [5,11]]
},
'V': {
'bonds': [[0,1], [1,2], [2,3], [1,4], [4,5], [4,6]]
},
'_': {
'bonds': []
}
}
#################################
##### ORIGINAL PROJECT DATA #####
#################################
def graph_laplacian_embedds(edges, eigen_k, center_idx=1, norm=False):
""" Returns the embeddings of points in the K
first eigenvectors of the graph Laplacian.
Inputs:
* edges: (2, N). long tensor or list. undirected edges are enough.
* eigen_k: int. N of first eigenvectors to return embeddings for.
* center_idx: int. index to take as center for the embeddings
* norm: bool. Whether to use the normalized Laplacian. Not recommended.
Output: (n_points, eigen_k)
"""
if isinstance(edges, list):
edges = torch.tensor(edges).long()
# correct dims
if edges.shape[0] != 2:
edges = edges.t()
# early stopping if empty entry
if edges.shape[0] == 0:
return torch.zeros(1, eigen_k)
# get params
size = torch.max(edges)+1
device = edges.device
# crate laplacian
adj_mat = torch.eye(size, device=device)
for i,j in edges.t():
adj_mat[i,j] = adj_mat[j,i] = 1.
deg_mat = torch.eye(size) * adj_mat.sum(dim=-1, keepdim=True)
laplace = deg_mat - adj_mat
# use norm-laplace if arg passed
if norm:
for i,j in edges.t():
laplace[i,j] = laplace[j,i] = -1 / (deg_mat[i,i] * deg_mat[j,j])**0.5
# get laplacian basis - eigendecomposition - order importance by eigenvalue
e, v = torch.symeig(laplace, eigenvectors=True)
idxs = torch.sort( e.abs(), descending=True)[1]
# take embedds and center
embedds = v[:, idxs[:eigen_k]]
embedds = embedds - embedds[center_idx].unsqueeze(-2)
return embedds
def make_atom_id_embedds(k):
""" Return the tokens for each atom in the aa. """
mask = torch.zeros(14).long()
atom_list = ["N", "CA", "C", "O"] + SC_BUILD_INFO[k]["atom-names"]
for i,atom in enumerate(atom_list):
mask[i] = ATOM_IDS[atom]
return mask
#################################
########## SAVE INFO ############
#################################
SUPREME_INFO = {k: {"cloud_mask": make_cloud_mask(k),
"bond_mask": make_bond_mask(k),
"theta_mask": make_theta_mask(k),
"torsion_mask": make_torsion_mask(k),
"idx_mask": make_idx_mask(k),
#
"eigen_embedd": graph_laplacian_embedds(GVP_DATA[k]["bonds"], eigen_k = 3),
"atom_id_embedd": make_atom_id_embedds(k)
}
for k in "ARNDCQEGHILKMFPSTWYV_"}
#################################
######### RANDOM UTILS ##########
#################################
def encode_dist(x, scales=[1,2,4,8], include_self = True):
""" Encodes a distance with sines and cosines.
Inputs:
* x: (batch, N) or (N,). data to encode.
Infer devic and type (f16, f32, f64) from here.
* scales: (s,) or list. lower or higher depending on distances.
Output: (..., num_scales*2 + 1) if include_self or (..., num_scales*2)
"""
x = x.unsqueeze(-1)
# infer device
device, precise = x.device, x.type()
# convert to tensor
if isinstance(scales, list):
scales = torch.tensor([scales], device=device).type(precise)
# get pos encodings
sines = torch.sin(x / scales)
cosines = torch.cos(x / scales)
# concat and return
enc_x = torch.cat([sines, cosines], dim=-1)
return torch.cat([enc_x, x], dim=-1) if include_self else enc_x
def decode_dist(x, scales=[1,2,4,8], include_self = False):
""" Encodes a distance with sines and cosines.
Inputs:
* x: (batch, N, 2*fourier_feats (+1) ) or (N,). data to encode.
Infer devic and type (f16, f32, f64) from here.
* scales: (s,) or list. lower or higher depending on distances.
* include_self: whether to average with raw prediction or not.
Output: (batch, N)
"""
device, precise = x.device, x.type()
# convert to tensor
if isinstance(scales, list):
scales = torch.tensor([scales], device=device).type(precise)
# decode by atan2 and correct negative angles
half = x.shape[-1]//2
decodes = torch.atan2(x[..., :half], x[..., half:2*half])
decodes += (decodes<0).type(precise) * 2*np.pi
# adjust offsets - TODO: handle the case of slightly higher than pi
# in higher scale but slightly lower than 0 in smaller scale
offsets = torch.zeros_like(decodes)
for i in range(decodes.shape[-1]-1, 0, -1):
# lower scale starts at previous+2pi if higher scale is > pi.
offsets[:, i-1] = 2 * ( offsets[:, i] + (decodes[:, i]>np.pi).type(precise) * np.pi )
# correct decodes by offsets
decodes += offsets
# scale up again and take mean
avg_dec = (decodes * scales).mean(dim=-1, keepdim=True)
# average with raw prediction
if include_self:
return 0.5*(avg_dec + x[..., -1:])
return avg_dec
def nth_deg_adjacency(adj_mat, n=1, sparse=False):
""" Calculates the n-th degree adjacency matrix.
Performs mm of adj_mat and adds the newly added.
Default is dense. Mods for sparse version are done when needed.
Inputs:
* adj_mat: (N, N) adjacency tensor
* n: int. degree of the output adjacency
* sparse: bool. whether to use torch-sparse module
Outputs:
* edge_idxs: the ij positions of the adjacency matrix
* edge_attrs: the degree of connectivity (1 for neighs, 2 for neighs^2 )
"""
adj_mat = adj_mat.float()
attr_mat = torch.zeros_like(adj_mat)
for i in range(n):
if i == 0:
attr_mat += adj_mat
continue
if i == 1 and sparse:
# create sparse adj tensor
adj_mat = torch.sparse.FloatTensor( adj_mat.nonzero().t(),
adj_mat[ adj_mat!=0 ] ).to(adj_mat.device).coalesce()
idxs, vals = adj_mat.indices(), adj_mat.values()
m, k, n = 3 * [adj_mat.shape[0]] # (m, n) * (n, k) , but adj_mats are squared: m=n=k
if sparse:
idxs, vals = torch_sparse.spspmm(idxs, vals, idxs, vals, m=m, k=k, n=n)
adj_mat = torch.zeros_like(attr_mat)
adj_mat[idxs[0], idxs[1]] = vals.bool().float()
else:
adj_mat = (adj_mat @ adj_mat).bool().float()
attr_mat[ (adj_mat - attr_mat.bool().float()).bool() ] += i+1
return adj_mat, attr_mat
def prot_covalent_bond(seq, adj_degree=1, cloud_mask=None):
""" Returns the idxs of covalent bonds for a protein.
Inputs
* seq: str. Protein sequence in 1-letter AA code.
* cloud_mask: mask selecting the present atoms.
Outputs: edge_idxs
"""
# create or infer cloud_mask
if cloud_mask is None:
cloud_mask = scn_cloud_mask(seq).bool()
device, precise = cloud_mask.device, cloud_mask.type()
# get starting poses for every aa
scaff = torch.zeros_like(cloud_mask)
scaff[:, 0] = 1
idxs = scaff[cloud_mask].nonzero().view(-1)
# get poses + idxs from the dict with GVP_DATA - return all edges
adj_mat = torch.zeros(idxs.amax()+14, idxs.amax()+14)
for i,idx in enumerate(idxs):
# bond with next aa
extra = []
if i < idxs.shape[0]-1:
extra = [[2, (idxs[i+1]-idx).item()]]
bonds = idx + torch.tensor( GVP_DATA[seq[i]]['bonds'] + extra ).long().t()
adj_mat[bonds[0], bonds[1]] = 1.
# convert to undirected
adj_mat = adj_mat + adj_mat.t()
# do N_th degree adjacency
adj_mat, attr_mat = nth_deg_adjacency(adj_mat, n=adj_degree, sparse=True)
edge_idxs = attr_mat.nonzero().t().long()
edge_attrs = attr_mat[edge_idxs[0], edge_idxs[1]]
return edge_idxs, edge_attrs
def dist2ca(x, mask=None, eps=1e-7):
""" Calculates distance from each point to C-alfa.
Inputs:
* x: (L, 14, D)
* mask: boolean mask of (L, 14)
Returns unit vectors and norm.
"""
x = x - x[:, 1].unsqueeze(1)
norm = torch.norm(x, dim=-1, keepdim=True)
x_norm = x / (norm+eps)
if mask:
return x_norm[mask], norm[mask]
return x_norm, norm
def orient_aa(x, mask=None, eps=1e-7):
""" Calculates unit vectors and norms of features for backbone.
Inputs:
* x: (L, 14, D). Cordinates in Sidechainnet format.
Returns unit vectors (5) and norms (3).
"""
# get tensor info
device, precise = x.device, x.type()
vec_wrap = torch.zeros(5, x.shape[0], 3, device=device) # (feats, L, dims+1)
norm_wrap = torch.zeros(3, x.shape[0], device=device)
# first feat is CB-CA
vec_wrap[0] = x[:, 4] - x[:, 1]
norm_wrap[0] = torch.norm(vec_wrap[0], dim=-1)
vec_wrap[0] /= norm_wrap[0].unsqueeze(dim=-1) + eps
# second is CA+ - CA :
vec_wrap[1, :-1] = x[:-1, 1] - x[1:, 1]
norm_wrap[1, :-1] = torch.norm(vec_wrap[1, :-1], dim=-1)
vec_wrap[1, :-1] /= norm_wrap[1, :-1].unsqueeze(dim=-1) + eps
# same but reverse vectors
vec_wrap[2] = (-1)*vec_wrap[1]
# third is CA - CA-
vec_wrap[3, 1:] = x[:-1, 1] - x[1:, 1]
norm_wrap[2, 1:] = torch.norm(vec_wrap[3, 1:], dim=-1)
vec_wrap[3, 1:] /= norm_wrap[2, 1:].unsqueeze(dim=-1) + eps
# now vectors in reverse order
vec_wrap[4] = (-1)*vec_wrap[3]
return vec_wrap, norm_wrap
def chain2atoms(x, mask=None):
""" Expand from (L, other) to (L, C, other). """
device, precise = x.device, x.type()
# get mask
wrap = torch.ones(x.shape[0], 14, *x.shape[1:]).type(precise).to(device)
# assign
wrap = wrap * x.unsqueeze(1)
if mask is not None:
return wrap[mask]
return wrap
def from_encode_to_pred(whole_point_enc, use_fourier=False, embedd_info=None, needed_info=None, vec_dim=3):
""" Turns the encoding from the above func into a label / prediction format.
Containing only the essential for position recovery (radial unit vec + norm)
Inputs: input_tuple containing:
* whole_point_enc: (atoms, vector_dims+scalar_dims)
Same shape from the function above.
Radial unit vector must be be the first vector dims
* embedd_info: dict. contains the number of scalar and vector feats.
"""
vec_dims = vec_dim * embedd_info["point_n_vectors"]
start_pos = 2*len(needed_info["atom_pos_scales"])+vec_dims
if use_fourier:
decoded_dist = decode_dist( whole_point_enc[:, vec_dims:start_pos+1],
scales=needed_info["atom_pos_scales"],
include_self=False)
else:
decoded_dist = whole_point_enc[:, start_pos:start_pos+1]
return torch.cat([# unit radial vector
whole_point_enc[:, :3],
# vector norm
decoded_dist
], dim=-1)
def encode_whole_bonds(x, x_format="coords", embedd_info={},
needed_info = {"cutoffs": [2,5,10],
"bond_scales": [.5, 1, 2],
"adj_degree": 1},
free_mem=False, eps=1e-7):
""" Given some coordinates, and the needed info,
encodes the bonds from point information.
* x: (N, 3) or prediction format
* x_format: one of ["coords" or "prediction"]
* embedd_info: dict. contains the needed embedding info
* needed_info: dict. contains additional needed info
{ cutoffs: list. cutoff distances for bonds.
can be a string for the k closest (ex: "30_closest"),
empty list for just covalent.
bond_scales: list. fourier encodings
adj_degree: int. degree of adj (2 means adj of adj is my adj)
0 for no adjacency
}
* free_mem: whether to delete variables
* eps: constant for numerical stability
"""
device, precise = x.device, x.type()
# convert to 3d coords if passed as preds
if x_format == "encode":
pred_x = from_encode_to_pred(x, embedd_info=embedd_info, needed_info=needed_info)
x = pred_x[:, :3] * pred_x[:, 3:4]
# encode bonds
# 1. BONDS: find the covalent bond_indices - allow arg -> DRY
native = None
if "prot_covalent_bond" in needed_info.keys():
native = True
native_bonds = needed_info["covalent_bond"]
elif needed_info["adj_degree"]:
native = True
native_bonds = prot_covalent_bond(needed_info["seq"], needed_info["adj_degree"])
if native:
native_idxs, native_attrs = native_bonds[0].to(device), native_bonds[1].to(device)
# determine kind of cutoff (hard distance threhsold or closest points)
closest = None
if len(needed_info["cutoffs"]) > 0:
cutoffs = needed_info["cutoffs"].copy()
if sum( isinstance(ci, str) for ci in cutoffs ) > 0:
cutoffs = [-1e-3] # negative so no bond is taken
closest = int( needed_info["cutoffs"][0].split("_")[0] )
# points under cutoff = d(i - j) < X
cutoffs = torch.tensor(cutoffs, device=device).type(precise)
dist_mat = torch.cdist(x, x, p=2)
# normal buckets
bond_buckets = torch.zeros(*x.shape[:-1], x.shape[-2], device=device).type(precise)
if len(needed_info["cutoffs"]) > 0 and not closest:
# count from latest degree of adjacency given
bond_buckets = torch.bucketize(dist_mat, cutoffs)
bond_buckets[native_idxs[0], native_idxs[1]] = cutoffs.shape[0]
# find the indexes - symmetric and we dont want the diag
bond_buckets += cutoffs.shape[0] * torch.eye(bond_buckets.shape[0], device=device).long()
close_bond_idxs = ( bond_buckets < cutoffs.shape[0] ).nonzero().t()
# move away from poses reserved for native
bond_buckets[close_bond_idxs[0], close_bond_idxs[1]] += needed_info["adj_degree"]+1
# the K closest (covalent bonds excluded) are considered bonds
elif closest:
k = closest
# copy dist_mat and mask the covalent bonds out
masked_dist_mat = dist_mat.clone()
masked_dist_mat += torch.eye(masked_dist_mat.shape[0], device=device) * torch.amax(masked_dist_mat)
masked_dist_mat[native_idxs[0], native_idxs[1]] = masked_dist_mat[0,0].clone()
# argsort by distance || *(-1) so min is first
_, sorted_col_idxs = torch.topk(-masked_dist_mat, k=k, dim=-1)
# cat idxs and repeat row idx to match number of column idx
sorted_col_idxs = rearrange(sorted_col_idxs[:, :k], '... n k -> ... (n k)')
sorted_row_idxs = torch.repeat_interleave( torch.arange(dist_mat.shape[0]).long(), repeats=k ).to(device)
close_bond_idxs = torch.stack([ sorted_row_idxs, sorted_col_idxs ], dim=0)
# move away from poses reserved for native
bond_buckets = torch.ones_like(dist_mat) * (needed_info["adj_degree"]+1)
# merge all bonds
if len(needed_info["cutoffs"]) > 0:
if close_bond_idxs.shape[0] > 0:
whole_bond_idxs = torch.cat([native_idxs, close_bond_idxs], dim=-1)
else:
whole_bond_idxs = native_idxs
# 2. ATTRS: encode bond -> attrs
bond_vecs = x[ whole_bond_idxs[0] ] - x[ whole_bond_idxs[1] ]
bond_norms = torch.norm(bond_vecs, dim=-1)
bond_vecs /= (bond_norms + eps).unsqueeze(-1)
bond_norms_enc = encode_dist(bond_norms, scales=needed_info["bond_scales"]).squeeze()
if native:
bond_buckets[native_idxs[0], native_idxs[1]] = native_attrs
bond_attrs = bond_buckets[whole_bond_idxs[0] , whole_bond_idxs[1]]
# pack scalars and vectors - extra token for covalent bonds
bond_n_vectors = 1
bond_n_scalars = (2 * len(needed_info["bond_scales"]) + 1) + 1 # last one is an embedd of size 1+len(cutoffs)
whole_bond_enc = torch.cat([bond_vecs, # 1 vector - no need of reverse - we do 2x bonds (symmetry)
# scalars
bond_norms_enc, # 2 * len(scales)
(bond_attrs-1).unsqueeze(-1) # 1
], dim=-1)
# free gpu mem
if free_mem:
del bond_buckets, bond_norms_enc, bond_vecs, dist_mat,\
close_bond_idxs, native_bond_idxs
if closest:
del masked_dist_mat, sorted_col_idxs, sorted_row_idxs
embedd_info = {"bond_n_vectors": bond_n_vectors,
"bond_n_scalars": bond_n_scalars,
"bond_embedding_nums": [ len(needed_info["cutoffs"]) + needed_info["adj_degree"] ]} # extra one for covalent (default)
return whole_bond_idxs, whole_bond_enc, embedd_info
def encode_whole_protein(seq, true_coords, angles, padding_seq,
needed_info = { "cutoffs": [2, 5, 10],
"bond_scales": [0.5, 1, 2]}, free_mem=False):
""" Encodes a whole protein. In points + vectors. """
device, precise = true_coords.device, true_coords.type()
#################
# encode points #
#################
cloud_mask = torch.tensor(scn_cloud_mask(seq[:-padding_seq or None])).bool().to(device)
flat_mask = rearrange(cloud_mask, 'l c -> (l c)')
# embedd everything
# general position embedding
center_coords = true_coords - true_coords.mean(dim=0)
pos_unit_norms = torch.norm(center_coords, dim=-1, keepdim=True)
pos_unit_vecs = center_coords / pos_unit_norms
pos_unit_norms_enc = encode_dist(pos_unit_norms, scales=needed_info["atom_pos_scales"]).squeeze()
# reformat coordinates to scn (L, 14, 3) - TODO: solve if padding=0
coords_wrap = rearrange(center_coords, '(l c) d -> l c d', c=14)[:-padding_seq or None]
# position in backbone embedding
aa_pos = encode_dist( torch.arange(len(seq[:-padding_seq or None]), device=device).float(), scales=needed_info["aa_pos_scales"])
atom_pos = chain2atoms(aa_pos)[cloud_mask]
# atom identity embedding
atom_id_embedds = torch.stack([SUPREME_INFO[k]["atom_id_embedd"] for k in seq[:-padding_seq or None]],
dim=0)[cloud_mask].to(device)
# aa embedding
seq_int = torch.tensor([AAS2NUM[aa] for aa in seq[:-padding_seq or None]], device=device).long()
aa_id_embedds = chain2atoms(seq_int, mask=cloud_mask)
# CA - SC distance
dist2ca_vec, dist2ca_norm = dist2ca(coords_wrap)
dist2ca_norm_enc = encode_dist(dist2ca_norm, scales=needed_info["dist2ca_norm_scales"]).squeeze()
# BACKBONE feats
vecs, norms = orient_aa(coords_wrap)
bb_vecs_atoms = chain2atoms(torch.transpose(vecs, 0, 1), mask=cloud_mask)
bb_norms_atoms = chain2atoms(torch.transpose(norms, 0, 1), mask=cloud_mask)
bb_norms_atoms_enc = encode_dist(bb_norms_atoms, scales=[0.5])
################
# encode bonds #
################
bond_info = encode_whole_bonds(x = coords_wrap[cloud_mask],
x_format = "coords",
embedd_info = {},
needed_info = needed_info )
whole_bond_idxs, whole_bond_enc, bond_embedd_info = bond_info
#########
# merge #
#########
# concat so that final is [vector_dims, scalar_dims]
point_n_vectors = 1 + 1 + 5
point_n_scalars = 2*len(needed_info["atom_pos_scales"]) + 1 +\
2*len(needed_info["aa_pos_scales"]) + 1 +\
2*len(needed_info["dist2ca_norm_scales"]) + 1+\
rearrange(bb_norms_atoms_enc, 'atoms feats encs -> atoms (feats encs)').shape[1] +\
2 # the last 2 are to be embedded yet
whole_point_enc = torch.cat([ pos_unit_vecs[ :-padding_seq*14 or None ][ flat_mask ], # 1
dist2ca_vec[cloud_mask], # 1
rearrange(bb_vecs_atoms, 'atoms n d -> atoms (n d)'), # 5
# scalars
pos_unit_norms_enc[ :-padding_seq*14 or None ][ flat_mask ], # 2n+1
atom_pos, # 2n+1
dist2ca_norm_enc[cloud_mask], # 2n+1
rearrange(bb_norms_atoms_enc, 'atoms feats encs -> atoms (feats encs)'), # 2n+1
atom_id_embedds.unsqueeze(-1),
aa_id_embedds.unsqueeze(-1) ], dim=-1) # the last 2 are yet to be embedded
if free_mem:
del pos_unit_vecs, dist2ca_vec, bb_vecs_atoms, pos_unit_norms_enc, cloud_mask,\
atom_pos, dist2ca_norm_enc, bb_norms_atoms_enc, atom_id_embedds, aa_id_embedds
# record embedding dimensions
point_embedd_info = {"point_n_vectors": point_n_vectors,
"point_n_scalars": point_n_scalars,}
embedd_info = {**point_embedd_info, **bond_embedd_info}
return whole_point_enc, whole_bond_idxs, whole_bond_enc, embedd_info
def get_prot(dataloader_=None, vocab_=None, min_len=80, max_len=150, verbose=True):
""" Gets a protein from sidechainnet and returns
the right attrs for training.
Inputs:
* dataloader_: sidechainnet iterator over dataset
* vocab_: sidechainnet VOCAB class
* min_len: int. minimum sequence length
* max_len: int. maximum sequence length
* verbose: bool. verbosity level
"""
for batch in dataloader_['train']:
# try for breaking from 2 loops at once
try:
for i in range(batch.int_seqs.shape[0]):
# get variables
seq = ''.join([vocab_.int2char(aa) for aa in batch.int_seqs[i].numpy()])
int_seq = batch.int_seqs[i]
angles = batch.angs[i]
mask = batch.msks[i]
# get padding
padding_angles = (torch.abs(angles).sum(dim=-1) == 0).long().sum()
padding_seq = (batch.int_seqs[i] == 20).sum()
# only accept sequences with right dimensions and no missing coords
# # bigger than 0 to avoid errors with negative indexes later
if batch.crds[i].shape[0]//14 == int_seq.shape[0]:
if ( max_len > len(seq) and len(seq) > min_len ) and padding_seq == padding_angles:
if verbose:
print("stopping at sequence of length", len(seq))
# print(len(seq), angles.shape, "paddings: ", padding_seq, padding_angles)
raise StopIteration
else:
# print("found a seq of length:", len(seq),
# "but oustide the threshold:", min_len, max_len)
pass
except StopIteration:
break
return seq, batch.crds[i], angles, padding_seq, batch.msks[i], batch.pids[i]
| geometric-vector-perceptron-main | examples/data_utils.py |
from setuptools import setup, find_packages
setup(
name = 'x-clip',
packages = find_packages(exclude=[]),
include_package_data = True,
version = '0.12.9',
license='MIT',
description = 'X-CLIP',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/x-clip',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'deep learning',
'contrastive learning',
'CLIP',
],
install_requires=[
'beartype',
'einops>=0.6',
'ftfy',
'regex',
'torch>=1.6',
'torchvision'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| x-clip-main | setup.py |
import copy
import random
from functools import wraps
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms as T
from einops import rearrange
# augmentations
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
def get_default_aug(image_size, channels = 3):
is_rgb = channels == 3
is_greyscale = channels == 1
rgb_or_greyscale = is_rgb or is_greyscale
return torch.nn.Sequential(
RandomApply(
T.ColorJitter(0.8, 0.8, 0.8, 0.2),
p = 0.3
) if rgb_or_greyscale else nn.Identity(),
T.RandomGrayscale(p = 0.2) if is_rgb else nn.Identity(),
T.RandomHorizontalFlip(),
RandomApply(
T.GaussianBlur((3, 3), (1.0, 2.0)),
p = 0.2
),
T.RandomResizedCrop((image_size, image_size)),
T.Normalize(
mean=torch.tensor([0.485, 0.456, 0.406]),
std=torch.tensor([0.229, 0.224, 0.225])
) if is_rgb else nn.Identity(),
)
# helper functions
def default(val, def_val):
return def_val if val is None else val
def flatten(t):
return t.reshape(t.shape[0], -1)
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
def l2norm(t):
return F.normalize(t, p = 2, dim = -1)
# simclr loss fn
def contrastive_loss(queries, keys, temperature = 0.1):
b, device = queries.shape[0], queries.device
logits = queries @ keys.t()
logits = logits - logits.max(dim=-1, keepdim=True).values
logits /= temperature
return F.cross_entropy(logits, torch.arange(b, device=device))
def nt_xent_loss(queries, keys, temperature = 0.1):
b, device = queries.shape[0], queries.device
n = b * 2
projs = torch.cat((queries, keys))
logits = projs @ projs.t()
mask = torch.eye(n, device=device).bool()
logits = logits[~mask].reshape(n, n - 1)
logits /= temperature
labels = torch.cat(((torch.arange(b, device = device) + b - 1), torch.arange(b, device=device)), dim=0)
loss = F.cross_entropy(logits, labels, reduction = 'sum')
loss /= n
return loss
# loss fn
def loss_fn(x, y):
x = l2norm(x)
y = l2norm(y)
return 2 - 2 * (x * y).sum(dim=-1)
# MLP class for projector and predictor
def MLP(dim, projection_size, hidden_size = None):
hidden_size = default(hidden_size, dim)
return nn.Sequential(
nn.Linear(dim, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace = True),
nn.Linear(hidden_size, projection_size)
)
def SimSiamMLP(dim, projection_size, hidden_size = 4096):
hidden_size = default(hidden_size, projection_size * 2)
return nn.Sequential(
nn.Linear(dim, hidden_size, bias = False),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace = True),
nn.Linear(hidden_size, hidden_size, bias = False),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace = True),
nn.Linear(hidden_size, projection_size, bias = False),
nn.BatchNorm1d(projection_size, affine = False)
)
# a wrapper class for the base neural network
# will manage the interception of the hidden layer output
# and pipe it into the projecter and predictor nets
class NetWrapper(nn.Module):
def __init__(self, net, projection_size, projection_hidden_size = 4096, layer = -2):
super().__init__()
self.net = net
self.layer = layer
self.projector = None
self.projection_size = projection_size
self.projection_hidden_size = projection_hidden_size
self.hidden = {}
self.hook_registered = False
def _find_layer(self):
if type(self.layer) == str:
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif type(self.layer) == int:
children = [*self.net.children()]
return children[self.layer]
return None
def _hook(self, _, input, output):
device = input[0].device
self.hidden[device] = flatten(output)
def _register_hook(self):
layer = self._find_layer()
assert layer is not None, f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(self._hook)
self.hook_registered = True
@singleton('projector')
def _get_projector(self, hidden):
_, dim = hidden.shape
projector = SimSiamMLP(dim, self.projection_size, self.projection_hidden_size)
return projector.to(hidden)
def get_representation(self, x):
if self.layer == -1:
return self.net(x)
if not self.hook_registered:
self._register_hook()
self.hidden.clear()
_ = self.net(x)
hidden = self.hidden[x.device]
self.hidden.clear()
assert hidden is not None, f'hidden layer {self.layer} never emitted an output'
return hidden
def forward(self, x, return_projection = True):
representation = self.get_representation(x)
if not return_projection:
return representation
flattened_representation = rearrange(representation, '... d -> (...) d')
projector = self._get_projector(flattened_representation)
projection = projector(flattened_representation)
return projection, representation
# main class
class SimSiam(nn.Module):
def __init__(
self,
net,
image_size,
channels = 3,
hidden_layer = -2,
projection_size = 256,
projection_hidden_size = 4096,
augment_fn = None,
augment_fn2 = None
):
super().__init__()
self.net = net
# default SimCLR augmentation
self.augment1 = default(augment_fn, get_default_aug(image_size, channels))
self.augment2 = default(augment_fn2, self.augment1)
self.online_encoder = NetWrapper(net, projection_size, projection_hidden_size, layer=hidden_layer)
self.online_predictor = MLP(projection_size, projection_size, projection_hidden_size)
# get device of network and make wrapper same device
device = get_module_device(net)
self.to(device)
# send a mock image tensor to instantiate singleton parameters
self.forward(torch.randn(2, channels, image_size, image_size, device=device))
def forward(self, x):
assert not (self.training and x.shape[0] == 1), 'you must have greater than 1 sample when training, due to the batchnorm in the projection layer'
image_one, image_two = self.augment1(x), self.augment2(x)
online_proj_one, _ = self.online_encoder(image_one)
online_proj_two, _ = self.online_encoder(image_two)
online_pred_one = self.online_predictor(online_proj_one)
online_pred_two = self.online_predictor(online_proj_two)
with torch.no_grad():
target_encoder = self.online_encoder
target_proj_one, _ = target_encoder(image_one)
target_proj_two, _ = target_encoder(image_two)
target_proj_one.detach_()
target_proj_two.detach_()
loss_one = loss_fn(online_pred_one, target_proj_two)
loss_two = loss_fn(online_pred_two, target_proj_one)
loss = loss_one + loss_two
return loss.mean()
# SimCLR
class SimCLR(nn.Module):
def __init__(
self,
net,
image_size,
channels = 3,
hidden_layer = -2,
project_hidden = True,
project_dim = 128,
augment_both = True,
use_nt_xent_loss = False,
augment_fn = None,
temperature = 0.1
):
super().__init__()
self.net = NetWrapper(net, project_dim, layer = hidden_layer)
self.augment = default(augment_fn, get_default_aug(image_size, channels))
self.augment_both = augment_both
self.temperature = temperature
# get device of network and make wrapper same device
device = get_module_device(net)
self.to(device)
# send a mock image tensor to instantiate parameters
self.forward(torch.randn(1, channels, image_size, image_size))
def forward(self, x):
b, c, h, w, device = *x.shape, x.device
transform_fn = self.augment if self.augment_both else noop
queries, _ = self.net(transform_fn(x))
keys, _ = self.net(self.augment(x))
queries, keys = map(flatten, (queries, keys))
loss = nt_xent_loss(queries, keys, temperature = self.temperature)
return loss
| x-clip-main | x_clip/visual_ssl.py |
import math
import copy
from contextlib import contextmanager
from functools import partial, wraps
import torch
import torch.nn.functional as F
import torch.distributed as distributed
from torch import nn, einsum
from torch.utils.checkpoint import checkpoint
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange, Reduce
from x_clip.mlm import MLM
from x_clip.visual_ssl import SimSiam, SimCLR
from x_clip.distributed import all_gather
# helper functions
def identity(t, *args, **kwargs):
return t
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
@contextmanager
def null_context():
yield
def max_neg_value(dtype):
return -torch.finfo(dtype).max
def cast_tuple(t):
return t if isinstance(t, (tuple, list)) else (t,)
def masked_mean(t, mask, dim = 1, eps = 1e-6):
t = t.masked_fill(~mask, 0.)
numer = t.sum(dim = dim)
denom = mask.sum(dim = dim).clamp(min = eps)
return numer / denom
def pad_dim_to(t, length, dim = 0):
pad_length = length - t.shape[dim]
zero_pairs = (-dim - 1) if dim < 0 else (t.ndim - dim - 1)
return F.pad(t, (*((0, 0) * zero_pairs), 0, pad_length))
def log(t, eps = 1e-20):
return torch.log(t + eps)
def l2norm(t):
return F.normalize(t, dim = -1)
def matrix_diag(t):
device = t.device
i, j = t.shape[-2:]
num_diag_el = min(i, j)
i_range = torch.arange(i, device = device)
j_range = torch.arange(j, device = device)
diag_mask = rearrange(i_range, 'i -> i 1') == rearrange(j_range, 'j -> 1 j')
diag_el = t.masked_select(diag_mask)
return rearrange(diag_el, '(b d) -> b d', d = num_diag_el)
# checkpointing helper function
def make_checkpointable(fn):
@wraps(fn)
def inner(*args):
input_needs_grad = any([isinstance(el, torch.Tensor) and el.requires_grad for el in args])
if not input_needs_grad:
return fn(*args)
return checkpoint(fn, *args)
return inner
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(),dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# helper classes
class RearrangeImage(nn.Module):
def forward(self, x):
return rearrange(x, 'b (h w) c -> b c h w', h = int(math.sqrt(x.shape[1])))
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = -1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = -1, keepdim = True)
return (x - mean) * (var + eps).rsqrt() * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = LayerNorm(dim)
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(self.norm(x), *args, **kwargs)
# patch dropout
class PatchDropout(nn.Module):
def __init__(self, prob):
super().__init__()
assert 0 <= prob < 1.
self.prob = prob
def forward(self, x, force_keep_all = False):
if not self.training or self.prob == 0. or force_keep_all:
return x
b, n, _, device = *x.shape, x.device
batch_indices = torch.arange(b, device = device)
batch_indices = rearrange(batch_indices, '... -> ... 1')
num_patches_keep = max(1, int(n * (1 - self.prob)))
patch_indices_keep = torch.randn(b, n, device = device).topk(num_patches_keep, dim = -1).indices
return x[batch_indices, patch_indices_keep]
# rotary positional embedding
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, seq_len, device):
inv_freq = self.inv_freq
t = torch.arange(seq_len, device = device).type_as(inv_freq)
freqs = torch.einsum('i , j -> i j', t, inv_freq)
return torch.cat((freqs, freqs), dim = -1)
def rotate_half(x):
x = rearrange(x, '... (j d) -> ... j d', j = 2)
x1, x2 = x.unbind(dim = -2)
return torch.cat((-x2, x1), dim = -1)
def apply_rotary_pos_emb(freqs, t):
rot_dim = freqs.shape[-1]
t, t_pass = t[..., :rot_dim], t[..., rot_dim:]
t = (t * freqs.cos()) + (rotate_half(t) * freqs.sin())
return torch.cat((t, t_pass), dim = -1)
# transformer
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
inner_dim = int(dim * mult)
self.net = nn.Sequential(
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
LayerNorm(inner_dim),
nn.Dropout(dropout),
nn.Linear(inner_dim, dim, bias = False)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, dim_head = 64, heads = 8, causal = False, dropout = 0.):
super().__init__()
self.heads = heads
self.causal = causal
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(nn.Linear(inner_dim, dim, bias = False), LayerNorm(dim))
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask = None, rotary_pos_emb = None):
h, device, scale = self.heads, x.device, self.scale
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
q = q * self.scale
if exists(rotary_pos_emb):
apply_rotary = partial(apply_rotary_pos_emb, rotary_pos_emb)
q, k, v = map(apply_rotary, (q, k, v))
sim = einsum('b h i d, b h j d -> b h i j', q, k)
mask_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, mask_value)
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, mask_value)
attn = sim.softmax(dim = -1, dtype = torch.float32)
attn = attn.type(sim.dtype)
attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(
self,
dim,
*,
depth,
dim_head = 64,
heads = 8,
causal = False,
attn_dropout = 0.,
ff_dropout = 0.,
ff_mult = 4,
checkpoint_during_training = False
):
super().__init__()
self.checkpoint_during_training = checkpoint_during_training
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim = dim, dim_head = dim_head, heads = heads, causal = causal, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim = dim, mult = ff_mult)),
]))
self.norm_in = LayerNorm(dim)
self.norm_out = LayerNorm(dim)
def forward(
self,
x,
rotary_pos_emb = None,
mask = None
):
can_checkpoint = self.training and self.checkpoint_during_training
checkpoint_fn = make_checkpointable if can_checkpoint else identity
x = self.norm_in(x)
for attn, ff in self.layers:
attn, ff = map(checkpoint_fn, (attn, ff))
x = attn(x, mask, rotary_pos_emb) + x
x = ff(x) + x
return self.norm_out(x)
# text and vision transformers
class TextTransformer(nn.Module):
def __init__(
self,
dim,
*,
num_tokens,
max_seq_len,
dim_head,
rotary_pos_emb = None,
causal = False,
**kwargs
):
super().__init__()
self.token_emb = nn.Embedding(num_tokens, dim)
self.abs_pos_emb = nn.Embedding(max_seq_len, dim) if not rotary_pos_emb else None
self.rotary_pos_emb = RotaryEmbedding(min(dim_head, 32)) if rotary_pos_emb else None
self.cls_token = nn.Parameter(torch.randn(dim)) if not causal else None
self.transformer = Transformer(dim, dim_head = dim_head, causal = causal, **kwargs)
def forward(self, x, mask = None):
b, n, device = *x.shape, x.device
x = self.token_emb(x)
if exists(self.abs_pos_emb):
pos_emb = self.abs_pos_emb(torch.arange(n, device = device))
x = x + rearrange(pos_emb, 'n d -> 1 n d')
rotary_pos_emb = None
if exists(self.rotary_pos_emb):
rotary_pos_emb = self.rotary_pos_emb(n + 1, device = device)
if exists(self.cls_token):
cls_tokens = repeat(self.cls_token, 'd -> b 1 d', b = b)
x = torch.cat((cls_tokens, x), dim = 1)
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
out = self.transformer(x, mask = mask, rotary_pos_emb = rotary_pos_emb)
return out
class VisionTransformer(nn.Module):
def __init__(
self,
dim,
*,
image_size,
patch_size,
channels,
patch_dropout = 0.5,
**kwargs
):
super().__init__()
assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.to_tokens = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size),
nn.Linear(patch_dim, dim)
)
self.pos_emb = nn.Embedding(num_patches, dim)
self.patch_dropout = PatchDropout(patch_dropout)
self.transformer = Transformer(dim, **kwargs)
self.to_cls_tokens = nn.Sequential(
Reduce('b n d -> b d', 'mean'),
nn.Linear(dim, dim, bias = False),
Rearrange('b d -> b 1 d')
)
def forward(
self,
x,
keep_all_patches = False
):
device = x.device
x = self.to_tokens(x)
b, n, _ = x.shape
pos_emb = self.pos_emb(torch.arange(n, device = device))
x = x + rearrange(pos_emb, 'n d -> 1 n d')
x = self.patch_dropout(x, force_keep_all = keep_all_patches)
out = self.transformer(x)
cls_tokens = self.to_cls_tokens(out)
return torch.cat((cls_tokens, out), dim = 1)
# contrastive learning functions
def model_forward_with_context(
*,
fn,
args,
freeze,
):
encoding_context = null_context if not freeze else torch.no_grad
with encoding_context():
enc = fn(*args)
if freeze:
enc.detach_()
return enc
# main clip class
class CLIP(nn.Module):
def __init__(
self,
*,
image_encoder = None,
text_encoder = None,
dim_text = 512,
dim_image = 512,
dim_latent = 512,
num_text_tokens = 10000,
text_enc_depth = 6,
text_seq_len = 256,
text_heads = 8,
text_dim_head = 64,
text_has_cls_token = True,
text_pad_id = 0,
text_rotary_pos_emb = False,
text_causal_mask = False,
text_eos_id = None,
text_encode_without_mask = False,
visual_enc_depth = 6,
visual_heads = 8,
visual_dim_head = 64,
visual_image_size = 256,
visual_patch_size = 32,
visual_patch_dropout = 0.5,
visual_has_cls_token = True,
channels = 3,
use_all_token_embeds = False,
downsample_image_embeds = False,
decoupled_contrastive_learning = False,
extra_latent_projection = False,
use_mlm = False,
text_ssl_loss_weight = 0.05,
use_visual_ssl = False,
visual_ssl = None,
visual_ssl_type = 'simsiam',
visual_ssl_hidden_layer = -1,
simclr_temperature = 0.1,
image_ssl_loss_weight = 0.05,
multiview_loss_weight = 0.1,
checkpoint_during_training = False,
**kwargs
):
super().__init__()
assert use_all_token_embeds or (visual_has_cls_token or text_has_cls_token), 'CLS token must be included on both vision and text transformers if you are not using fine-grained contrastive learning loss'
# store some parameters for access
self.dim_text = dim_text
self.dim_image = dim_image
self.dim_latent = dim_latent
self.image_channels = channels
self.image_size = visual_image_size
# instantiate text transformer
self.text_pad_id = text_pad_id
self.text_has_cls_token = text_has_cls_token
self.text_seq_len = text_seq_len
self.text_encode_without_mask = text_encode_without_mask # whether to pass in text mask to text encoder
self.text_causal_mask = text_causal_mask
self.text_eos_id = text_eos_id
assert not (text_causal_mask and not exists(text_eos_id)), 'text EOS token id must be given if using causal mask in text transformer'
if exists(text_encoder):
self.text_transformer = text_encoder
else:
self.text_transformer = TextTransformer(
dim = dim_text,
num_tokens = num_text_tokens + (1 if use_mlm else 0),
max_seq_len = text_seq_len,
depth = text_enc_depth,
heads = text_heads,
causal = text_causal_mask,
dim_head = text_dim_head,
rotary_pos_emb = text_rotary_pos_emb,
checkpoint_during_training = checkpoint_during_training
)
# instantiate image transformer
self.visual_has_cls_token = visual_has_cls_token
if exists(image_encoder):
self.visual_transformer = image_encoder
else:
self.visual_transformer = VisionTransformer(
dim = dim_image,
image_size = visual_image_size,
patch_size = visual_patch_size,
channels = channels,
depth = visual_enc_depth,
heads = visual_heads,
dim_head = visual_dim_head,
patch_dropout = visual_patch_dropout,
checkpoint_during_training = checkpoint_during_training
)
# text ssl
self.use_mlm = use_mlm
self.text_ssl_loss_weight = text_ssl_loss_weight if use_mlm else 0
if use_mlm:
mlm_kwargs, kwargs = groupby_prefix_and_trim('mlm_', kwargs)
self.mlm = MLM(
self.text_transformer,
dim = dim_text,
num_tokens = num_text_tokens,
**mlm_kwargs
)
# image ssl
self.use_visual_ssl = use_visual_ssl or exists(visual_ssl)
self.image_ssl_loss_weight = image_ssl_loss_weight if use_visual_ssl else 0
if self.use_visual_ssl:
if exists(visual_ssl):
self.visual_ssl = visual_ssl
elif use_visual_ssl:
if visual_ssl_type == 'simsiam':
ssl_type = partial(SimSiam, channels = channels)
elif visual_ssl_type == 'simclr':
ssl_type = partial(SimCLR, temperature = simclr_temperature, channels = channels)
else:
raise ValueError(f'unknown visual_ssl_type')
self.visual_ssl = ssl_type(
self.visual_transformer,
image_size = visual_image_size,
hidden_layer = visual_ssl_hidden_layer
)
# text latent projection
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias = False)
# image latent projection
if downsample_image_embeds:
assert use_all_token_embeds, 'must be using all token embeds for contrastive learning in order to downsampling'
self.to_visual_latent = nn.Sequential(
RearrangeImage(),
nn.Conv2d(dim_image, dim_image, 4, stride = 2, padding = 1, bias = False, groups = dim_image),
nn.Conv2d(dim_image, dim_latent, 1),
Rearrange('b c h w -> b (h w) c')
)
else:
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias = False)
# temperature
self.temperature = nn.Parameter(torch.tensor(1.))
# from https://arxiv.org/abs/2111.07783 (FILIP paper)
self.use_all_token_embeds = use_all_token_embeds
# proposed in https://arxiv.org/abs/2110.06848 (DCL) and https://arxiv.org/abs/2110.11316 (CLOOB)
self.decoupled_contrastive_learning = decoupled_contrastive_learning
# proposed in https://arxiv.org/abs/2110.11316 (CLOOB)
self.extra_latent_projection = extra_latent_projection
self.to_text_latent_extra = copy.deepcopy(self.to_text_latent)
self.to_visual_latent_extra = copy.deepcopy(self.to_visual_latent)
self.multiview_loss_weight = multiview_loss_weight
# is distributed or not
self.requires_all_gather = distributed.is_initialized() and distributed.get_world_size() > 1
def forward(
self,
text,
image,
return_loss = False,
return_encodings = False,
return_latents = False,
freeze_image_encoder = False, # image encoder is not trained if this is set to True, proposed by LiT paper
freeze_text_encoder = False, # text encoder is not trained if this is set to True
text_to_image = True, # in the case the extra projection is turned on, would return different similarity values depending on modality directionality
aug_text = None, # augmented text (for multiview)
aug_image = None # augmented image (for multiview)
):
b, device = text.shape[0], text.device
# derive text mask
text_mask = text != self.text_pad_id
# ssl
text_ssl_loss = 0
image_ssl_loss = 0
if return_loss:
text_ssl_loss = self.mlm(text, mask = text_mask) if self.use_mlm else 0
image_ssl_loss = self.visual_ssl(image) if self.use_visual_ssl else 0
# concat augmented texts and images and do some asserts
num_batch_texts = num_batch_images = 1
if exists(aug_text):
aug_text = cast_tuple(aug_text)
assert all(map(lambda t: t.shape == text.shape, aug_text))
num_batch_texts = len(aug_text) + 1
aug_text = torch.cat(aug_text, dim = 0)
aug_text_mask = aug_text != self.text_pad_id
text_mask = torch.cat((text_mask, aug_text_mask), dim = 0)
text = torch.cat((text, aug_text), dim = 0)
if exists(aug_image):
aug_image = cast_tuple(aug_image)
assert all(map(lambda i: i.shape == image.shape, aug_image))
num_batch_images = len(aug_image) + 1
aug_image = torch.cat(aug_image, dim = 0)
image = torch.cat((image, aug_image), dim = 0)
is_multiview = (num_batch_texts > 1 or num_batch_images > 1)
assert not (return_loss and not self.training), 'loss cannot be used if not training'
assert not (not return_loss and is_multiview), 'do not pass in augmented texts or images if not training'
assert not (self.multiview_loss_weight == 0 and is_multiview), 'multiview loss weight cannot be 0 if augmented text or images passed in'
# get encoded text
text_args = (text,)
if not self.text_encode_without_mask:
text_args = (*text_args, text_mask)
enc_text = model_forward_with_context(
fn = self.text_transformer,
args = text_args,
freeze = freeze_text_encoder
)
# depending on whether text is using causal mask, post process, moving eos token to the first position
if self.text_causal_mask:
eos_text_mask = (text == self.text_eos_id)
assert torch.all(torch.any(eos_text_mask, dim = -1)), f'some of the text rows does not have the eos id {self.text_eos_id}'
text_len = text.shape[-1]
eos_indices = eos_text_mask.float().argmax(dim = -1, keepdim = True)
eos_text_mask = torch.zeros_like(eos_text_mask).scatter(1, eos_indices, 1.).bool()
eos_text_mask = rearrange(eos_text_mask, '... -> ... 1')
eos_tokens = enc_text.masked_select(eos_text_mask)
rest_tokens = enc_text.masked_select(~eos_text_mask)
eos_tokens = rearrange(eos_tokens, '(b d) -> b 1 d', b = b)
rest_tokens = rearrange(rest_tokens, '(b n d) -> b n d', b = b, n = text_len - 1)
enc_text = torch.cat((eos_tokens, rest_tokens), dim = 1)
# whether to train image encoder, in the case that the image net was pretrained as recommended in LiT
enc_image = model_forward_with_context(
fn = self.visual_transformer,
args = (image,),
freeze = freeze_image_encoder
)
# early return of encodings, if needed (for DALL-E2)
if return_encodings:
return enc_text, enc_image
# depending on whether to do fine-grained CLIP or not, select either all tokens, or CLS tokens only
if self.use_all_token_embeds:
assert enc_text.ndim == 3, 'encoded text must have 3 dimensions (batch, seq, features)'
assert enc_image.ndim == 3, 'encoded image must have 3 dimensions (batch, seq [height x width], features)'
text_embeds = enc_text[:, 1:] if self.text_has_cls_token else enc_text
image_embeds = enc_image[:, 1:] if self.visual_has_cls_token else enc_image
else:
text_embeds = enc_text[:, 0] if enc_text.ndim == 3 else enc_text
image_embeds = enc_image[:, 0] if enc_image.ndim == 3 else enc_image
# project to latents
text_latents = self.to_text_latent(text_embeds)
image_latents = self.to_visual_latent(image_embeds)
text_latents, image_latents = map(l2norm, (text_latents, image_latents))
# calculate another set of latents for image to text (vs text to image)
# proposed by CLOOB
text_latents_extra, image_latents_extra = text_latents, image_latents
if self.extra_latent_projection:
text_latents_extra = self.to_text_latent_extra(text_embeds)
image_latents_extra = self.to_visual_latent_extra(image_embeds)
text_latents_extra, image_latents_extra = map(l2norm, (text_latents_extra, image_latents_extra))
# whether to early return latents
if return_latents:
if self.extra_latent_projection:
return text_latents, image_latents, text_latents_extra, image_latents_extra
return text_latents, image_latents
# get temperature
temp = self.temperature.exp()
# early return, if needed
if not return_loss and self.use_all_token_embeds:
einsum_args = (text_latents_extra, image_latents_extra) if self.extra_latent_projection and not text_to_image else (text_latents, image_latents)
return einsum('b t d, b i d -> b t i', *einsum_args) * temp
if not return_loss and not self.use_all_token_embeds:
einsum_args = (text_latents_extra, image_latents_extra) if self.extra_latent_projection and not text_to_image else (text_latents, image_latents)
return einsum('b d, b d -> b', *einsum_args) * temp
# split out multiview dimension for text and images
text_latents = rearrange(text_latents, '(m b) ... -> m b ...', m = num_batch_texts)
image_latents = rearrange(image_latents, '(m b) ... -> m b ...', m = num_batch_images)
if self.extra_latent_projection:
text_latents_extra = rearrange(text_latents_extra, '(m b) ... -> m b ...', m = num_batch_texts)
image_latents_extra = rearrange(image_latents_extra, '(m b) ... -> m b ...', m = num_batch_images)
# maybe distributed all gather
if self.requires_all_gather:
latents = torch.stack((text_latents, image_latents))
latents, sizes = all_gather(latents, 2, None)
text_latents, image_latents = latents
if self.extra_latent_projection:
latents_extra = torch.stack((text_latents_extra, image_latents_extra))
latents_extra, _ = all_gather(latents_extra, 2, sizes)
text_latents_extra, image_latents_extra = latents_extra
# contrastive loss
"""
m - num batches of text (for multiview)
n - num batches of images (for multiview)
x - batches of text
y - batches of images
t - sequence dimension along text tokens
i - sequence dimension along image tokens
"""
if self.use_all_token_embeds:
# fine-grained CLIP logic
sim_text_to_image = einsum('m x t d, n y i d -> m n x y t i', text_latents, image_latents) * temp
sim_image_to_text = sim_text_to_image
if self.extra_latent_projection:
sim_image_to_text = einsum('m x t d, n y i d -> m n x y t i', text_latents_extra, image_latents_extra) * temp
text_to_image = reduce(sim_text_to_image, '... t i -> ... t', 'max')
text_to_image_mask = rearrange(text_mask, '(m b) t -> m 1 b 1 t', m = num_batch_texts)
text_to_image = masked_mean(text_to_image, text_to_image_mask, dim = -1)
image_to_text_mask = rearrange(text_mask, '(m b) t -> m 1 b 1 t 1', m = num_batch_texts)
masked_sim = sim_image_to_text.masked_fill(~image_to_text_mask, max_neg_value(sim_image_to_text.dtype))
image_to_text = reduce(reduce(masked_sim, '... t i -> ... i', 'max'), '... i -> ...', 'mean')
else:
text_to_image = einsum('m t d, n i d -> m n t i', text_latents, image_latents) * temp
image_to_text = rearrange(text_to_image, '... t i -> ... i t')
if self.extra_latent_projection:
image_to_text = einsum('m t d, n i d -> m n i t', text_latents_extra, image_latents_extra) * temp
# calculate loss
text_to_image = rearrange(text_to_image, 'm n ... -> (m n) ...')
image_to_text = rearrange(image_to_text, 'm n ... -> (m n) ...')
# exponentiate
text_to_image_exp, image_to_text_exp = map(torch.exp, (text_to_image, image_to_text))
# numerators
text_to_image_pos, image_to_text_pos = map(matrix_diag, (text_to_image_exp, image_to_text_exp))
# denominator
if self.decoupled_contrastive_learning:
pos_mask = torch.eye(b, device = device, dtype = torch.bool)
text_to_image_exp, image_to_text_exp = map(lambda t: t.masked_fill(pos_mask, 0.), (text_to_image_exp, image_to_text_exp))
text_to_image_denom, image_to_text_denom = map(lambda t: t.sum(dim = -1), (text_to_image_exp, image_to_text_exp))
# loss
text_to_image_loss = (-log(text_to_image_pos) + log(text_to_image_denom)).mean(dim = -1)
image_to_text_loss = (-log(image_to_text_pos) + log(image_to_text_denom)).mean(dim = -1)
# calculate CL loss
cl_losses = (text_to_image_loss + image_to_text_loss) / 2
# get main CL loss vs multiview CL losses
cl_loss, multiview_cl_loss = cl_losses[0], cl_losses[1:]
# if no augmented text or images passed in, multiview loss weight is 0
multiview_loss_weight = self.multiview_loss_weight if is_multiview else 0
# calculate weights
cl_loss_weight = 1 - (self.text_ssl_loss_weight + self.image_ssl_loss_weight + multiview_loss_weight)
loss = (cl_loss * cl_loss_weight) \
+ (text_ssl_loss * self.text_ssl_loss_weight) \
+ (image_ssl_loss * self.image_ssl_loss_weight)
# add multiview CL loss with weight
if is_multiview:
loss = loss + multiview_cl_loss.mean() * multiview_loss_weight
return loss
| x-clip-main | x_clip/x_clip.py |
import math
from functools import reduce
import torch
from torch import nn
import torch.nn.functional as F
# helpers
def prob_mask_like(t, prob):
return torch.zeros_like(t).float().uniform_(0, 1) < prob
def mask_with_tokens(t, token_ids):
init_no_mask = torch.full_like(t, False, dtype=torch.bool)
mask = reduce(lambda acc, el: acc | (t == el), token_ids, init_no_mask)
return mask
def get_mask_subset_with_prob(mask, prob):
batch, seq_len, device = *mask.shape, mask.device
max_masked = math.ceil(prob * seq_len)
num_tokens = mask.sum(dim=-1, keepdim=True)
mask_excess = (mask.cumsum(dim=-1) > (num_tokens * prob).ceil())
mask_excess = mask_excess[:, :max_masked]
rand = torch.rand((batch, seq_len), device=device).masked_fill(~mask, -1e9)
_, sampled_indices = rand.topk(max_masked, dim=-1)
sampled_indices = (sampled_indices + 1).masked_fill_(mask_excess, 0)
new_mask = torch.zeros((batch, seq_len + 1), device=device)
new_mask.scatter_(-1, sampled_indices, 1)
return new_mask[:, 1:].bool()
# main class
class MLM(nn.Module):
def __init__(
self,
transformer,
*,
dim,
num_tokens,
mask_prob = 0.15,
replace_prob = 0.9,
random_token_prob = 0.,
mask_token_id = 2,
pad_token_id = 0,
mask_ignore_token_ids = []):
super().__init__()
self.transformer = transformer
# mlm related probabilities
self.mask_prob = mask_prob
self.replace_prob = replace_prob
self.num_tokens = num_tokens
self.random_token_prob = random_token_prob
# token ids
self.pad_token_id = pad_token_id
self.mask_token_id = mask_token_id
self.mask_ignore_token_ids = set([*mask_ignore_token_ids, pad_token_id])
# to text logits
self.to_logits = nn.Linear(dim, num_tokens)
def forward(self, seq, **kwargs):
# do not mask [pad] tokens, or any other tokens in the tokens designated to be excluded ([cls], [sep])
# also do not include these special tokens in the tokens chosen at random
no_mask = mask_with_tokens(seq, self.mask_ignore_token_ids)
mask = get_mask_subset_with_prob(~no_mask, self.mask_prob)
# mask out any tokens to padding tokens that were not originally going to be masked
labels = seq.masked_fill(~mask, self.pad_token_id)
# mask seq with mask tokens with probability of `replace_prob` (keep tokens the same with probability 1 - replace_prob)
masked_seq = seq.clone().detach()
# if random token probability > 0 for mlm
if self.random_token_prob > 0:
assert self.num_tokens is not None, 'num_tokens keyword must be supplied when instantiating MLM if using random token replacement'
random_token_prob = prob_mask_like(seq, self.random_token_prob)
random_tokens = torch.randint(0, self.num_tokens, seq.shape, device = seq.device)
random_no_mask = mask_with_tokens(random_tokens, self.mask_ignore_token_ids)
random_token_prob &= ~random_no_mask
masked_seq = torch.where(random_token_prob, random_tokens, masked_seq)
# subtract random token prob mask from mask
mask = mask & ~random_token_prob
# [mask] seq
replace_prob = prob_mask_like(seq, self.replace_prob)
masked_seq = masked_seq.masked_fill(mask * replace_prob, self.mask_token_id)
# get generator output and get mlm loss
embedding = self.transformer(masked_seq, **kwargs)
# project to logits and remove CLS
logits = self.to_logits(embedding)
logits = logits[:, 1:]
mlm_loss = F.cross_entropy(
logits.transpose(1, 2),
labels,
ignore_index = self.pad_token_id
)
return mlm_loss
| x-clip-main | x_clip/mlm.py |
from x_clip.x_clip import CLIP, TextTransformer
| x-clip-main | x_clip/__init__.py |
# take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py
# to give users a quick easy start to training DALL-E without doing BPE
import os
import html
from functools import lru_cache
from pathlib import Path
import regex as re
import ftfy
import torch
import torch.nn.functional as F
from beartype import beartype
from beartype.typing import Optional, Union, List
from torch.nn.utils.rnn import pad_sequence
# OpenAI simple tokenizer
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.vocab_size = 49408
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens, remove_start_end = True, pad_tokens = {}):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
if remove_start_end:
tokens = [token for token in tokens if token not in (49406, 40407, 0)]
text = ''.join([self.decoder[token] for token in tokens if token not in pad_tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
@beartype
def tokenize(
self,
texts: Union[str, List[str]],
context_length = 256,
truncate_text = False,
pad_to_context_length = False
):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
all_tensors = tuple(torch.tensor(ids) for ids in all_tokens)
tokens = pad_sequence(all_tensors, batch_first = True, padding_value = 0.)
max_length = tokens.shape[-1]
if max_length > context_length:
if truncate_text:
tokens = tokens[:, :context_length]
else:
raise RuntimeError(f"One of the inputs is too long for context length {context_length}")
if pad_to_context_length and max_length < context_length:
tokens = F.pad(tokens, (0, context_length - max_length), value = 0)
return tokens
tokenizer = SimpleTokenizer()
| x-clip-main | x_clip/tokenizer.py |
import torch
from torch.autograd import Function
import torch.distributed as distributed
from einops import rearrange
# distributed helpers
def all_gather_variable_dim(t, dim = 0, sizes = None):
device, rank, world_size = t.device, distributed.get_rank(), distributed.get_world_size()
if not exists(sizes):
size = torch.tensor(t.shape[dim], device = device, dtype = torch.long)
sizes = [torch.empty_like(size, device = device, dtype = torch.long) for i in range(world_size)]
distributed.all_gather(sizes, size)
sizes = torch.stack(sizes)
max_size = sizes.amax().item()
padded_t = pad_dim_to(t, max_size, dim = dim)
gathered_tensors = [torch.empty(padded_t.shape, device = device, dtype = padded_t.dtype) for i in range(world_size)]
distributed.all_gather(gathered_tensors, padded_t)
gathered_tensor = torch.cat(gathered_tensors, dim = dim)
seq = torch.arange(max_size, device = device)
mask = rearrange(seq, 'j -> 1 j') < rearrange(sizes, 'i -> i 1')
mask = rearrange(mask, 'i j -> (i j)')
seq = torch.arange(mask.shape[-1], device = device)
indices = seq[mask]
gathered_tensor = gathered_tensor.index_select(dim, indices)
return gathered_tensor, sizes
class AllGather(Function):
@staticmethod
def forward(ctx, x, dim, sizes):
assert distributed.is_initialized() and distributed.get_world_size() > 1
x, batch_sizes = all_gather_variable_dim(x, dim = dim, sizes = sizes)
ctx.batch_sizes = batch_sizes.tolist()
ctx.dim = dim
return x, batch_sizes
@staticmethod
def backward(ctx, grads, _):
batch_sizes, rank = ctx.batch_sizes, distributed.get_rank()
grads_by_rank = grads.split(batch_sizes, dim = ctx.dim)
return grads_by_rank[rank], None, None
all_gather = AllGather.apply
| x-clip-main | x_clip/distributed.py |
#!/usr/bin/env python
"""
Convert all recipes into feedstocks.
This script is to be run in a TravisCI context, with all secret environment
variables defined (STAGING_BINSTAR_TOKEN, GH_TOKEN)
Such as:
export GH_TOKEN=$(cat ~/.conda-smithy/github.token)
"""
from __future__ import print_function
from conda_build.metadata import MetaData
from conda_smithy.utils import get_feedstock_name_from_meta
from contextlib import contextmanager
from datetime import datetime
from github import Github, GithubException
import os.path
import shutil
import subprocess
import sys
import tempfile
import traceback
import time
import requests
from ruamel.yaml import YAML
# Enable DEBUG to run the diagnostics, without actually creating new feedstocks.
DEBUG = False
REPO_SKIP_LIST = ["core", "bot", "staged-recipes", "arm-arch", "systems"]
recipe_directory_name = 'recipes'
def list_recipes():
if os.path.isdir(recipe_directory_name):
recipes = os.listdir(recipe_directory_name)
else:
recipes = []
for recipe_dir in recipes:
# We don't list the "example" feedstock. It is an example, and is there
# to be helpful.
# .DS_Store is created by macOS to store custom attributes of its
# containing folder.
if recipe_dir in ['example', '.DS_Store']:
continue
path = os.path.abspath(os.path.join(recipe_directory_name, recipe_dir))
yield path, get_feedstock_name_from_meta(MetaData(path))
@contextmanager
def tmp_dir(*args, **kwargs):
temp_dir = tempfile.mkdtemp(*args, **kwargs)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
def repo_exists(gh, organization, name):
# Use the organization provided.
org = gh.get_organization(organization)
try:
org.get_repo(name)
return True
except GithubException as e:
if e.status == 404:
return False
raise
def feedstock_token_exists(organization, name):
r = requests.get(
"https://api.github.com/repos/%s/"
"feedstock-tokens/contents/tokens/%s.json" % (organization, name),
headers={"Authorization": "token %s" % os.environ["GH_TOKEN"]},
)
if r.status_code != 200:
return False
else:
return True
def print_rate_limiting_info(gh, user):
# Compute some info about our GitHub API Rate Limit.
# Note that it doesn't count against our limit to
# get this info. So, we should be doing this regularly
# to better know when it is going to run out. Also,
# this will help us better understand where we are
# spending it and how to better optimize it.
# Get GitHub API Rate Limit usage and total
gh_api_remaining = gh.get_rate_limit().core.remaining
gh_api_total = gh.get_rate_limit().core.limit
# Compute time until GitHub API Rate Limit reset
gh_api_reset_time = gh.get_rate_limit().core.reset
gh_api_reset_time -= datetime.utcnow()
print("")
print("GitHub API Rate Limit Info:")
print("---------------------------")
print("token: ", user)
print("Currently remaining {remaining} out of {total}.".format(
remaining=gh_api_remaining, total=gh_api_total))
print("Will reset in {time}.".format(time=gh_api_reset_time))
print("")
return gh_api_remaining
def sleep_until_reset(gh):
# sleep the job with printing every minute if we are out
# of github api requests
gh_api_remaining = gh.get_rate_limit().core.remaining
if gh_api_remaining == 0:
# Compute time until GitHub API Rate Limit reset
gh_api_reset_time = gh.get_rate_limit().core.reset
gh_api_reset_time -= datetime.utcnow()
mins_to_sleep = int(gh_api_reset_time.total_seconds() / 60)
mins_to_sleep += 2
print("Sleeping until GitHub API resets.")
for i in range(mins_to_sleep):
time.sleep(60)
print("slept for minute {curr} out of {tot}.".format(
curr=i+1, tot=mins_to_sleep))
return True
else:
return False
if __name__ == '__main__':
exit_code = 0
is_merged_pr = os.environ.get('CF_CURRENT_BRANCH') == 'main'
smithy_conf = os.path.expanduser('~/.conda-smithy')
if not os.path.exists(smithy_conf):
os.mkdir(smithy_conf)
def write_token(name, token):
with open(os.path.join(smithy_conf, name + '.token'), 'w') as fh:
fh.write(token)
if 'APPVEYOR_TOKEN' in os.environ:
write_token('appveyor', os.environ['APPVEYOR_TOKEN'])
if 'CIRCLE_TOKEN' in os.environ:
write_token('circle', os.environ['CIRCLE_TOKEN'])
if 'AZURE_TOKEN' in os.environ:
write_token('azure', os.environ['AZURE_TOKEN'])
if 'DRONE_TOKEN' in os.environ:
write_token('drone', os.environ['DRONE_TOKEN'])
if 'TRAVIS_TOKEN' in os.environ:
write_token('travis', os.environ['TRAVIS_TOKEN'])
if 'STAGING_BINSTAR_TOKEN' in os.environ:
write_token('anaconda', os.environ['STAGING_BINSTAR_TOKEN'])
gh_drone = Github(os.environ['GH_DRONE_TOKEN'])
gh_drone_remaining = print_rate_limiting_info(gh_drone, 'GH_DRONE_TOKEN')
gh_travis = Github(os.environ['GH_TRAVIS_TOKEN'])
gh = None
if 'GH_TOKEN' in os.environ:
write_token('github', os.environ['GH_TOKEN'])
gh = Github(os.environ['GH_TOKEN'])
# Get our initial rate limit info.
gh_remaining = print_rate_limiting_info(gh, 'GH_TOKEN')
# if we are out, exit early
# if sleep_until_reset(gh):
# sys.exit(1)
# try the other token maybe?
# if gh_remaining < gh_drone_remaining and gh_remaining < 100:
# write_token('github', os.environ['GH_DRONE_TOKEN'])
# gh = Github(os.environ['GH_DRONE_TOKEN'])
owner_info = ['--organization', 'conda-forge']
print('Calculating the recipes which need to be turned into feedstocks.')
with tmp_dir('__feedstocks') as feedstocks_dir:
feedstock_dirs = []
for recipe_dir, name in list_recipes():
if name.lower() in REPO_SKIP_LIST:
continue
feedstock_dir = os.path.join(feedstocks_dir, name + '-feedstock')
print('Making feedstock for {}'.format(name))
try:
subprocess.check_call(
['conda', 'smithy', 'init', recipe_dir,
'--feedstock-directory', feedstock_dir])
except subprocess.CalledProcessError:
traceback.print_exception(*sys.exc_info())
continue
if not is_merged_pr:
# We just want to check that conda-smithy is doing its
# thing without having any metadata issues.
continue
feedstock_dirs.append([feedstock_dir, name, recipe_dir])
subprocess.check_call([
'git', 'remote', 'add', 'upstream_with_token',
'https://conda-forge-manager:{}@github.com/'
'conda-forge/{}-feedstock'.format(
os.environ['GH_TOKEN'],
name
)
],
cwd=feedstock_dir
)
print_rate_limiting_info(gh_drone, 'GH_DRONE_TOKEN')
# Sometimes we already have the feedstock created. We need to
# deal with that case.
if repo_exists(gh, 'conda-forge', name + '-feedstock'):
subprocess.check_call(
['git', 'fetch', 'upstream_with_token'], cwd=feedstock_dir)
subprocess.check_call(
['git', 'branch', '-m', 'master', 'old'], cwd=feedstock_dir)
try:
subprocess.check_call(
[
'git', 'checkout', '-b', 'master',
'upstream_with_token/master'
],
cwd=feedstock_dir)
except subprocess.CalledProcessError:
# Sometimes, we have a repo, but there are no commits on
# it! Just catch that case.
subprocess.check_call(
['git', 'checkout', '-b' 'master'], cwd=feedstock_dir)
print_rate_limiting_info(gh_drone, 'GH_DRONE_TOKEN')
subprocess.check_call(
['conda', 'smithy', 'register-github', feedstock_dir]
+ owner_info
# hack to help travis work
# + ['--extra-admin-users', gh_travis.get_user().login]
# end of hack
)
print_rate_limiting_info(gh_drone, 'GH_DRONE_TOKEN')
from conda_smithy.ci_register import drone_sync
print("Running drone sync (can take ~100s)")
print_rate_limiting_info(gh_drone, 'GH_DRONE_TOKEN')
drone_sync()
time.sleep(100) # actually wait
print_rate_limiting_info(gh_drone, 'GH_DRONE_TOKEN')
# Break the previous loop to allow the TravisCI registering
# to take place only once per function call.
# Without this, intermittent failures to synch the TravisCI repos ensue.
# Hang on to any CI registration errors that occur and raise them at the end.
for num, (feedstock_dir, name, recipe_dir) in enumerate(feedstock_dirs):
if name.lower() in REPO_SKIP_LIST:
continue
print("\n\nregistering CI services for %s..." % name)
if num >= 10:
exit_code = 0
break
# Try to register each feedstock with CI.
# However sometimes their APIs have issues for whatever reason.
# In order to bank our progress, we note the error and handle it.
# After going through all the recipes and removing the converted ones,
# we fail the build so that people are aware that things did not clear.
# hack to help travis work
# from conda_smithy.ci_register import add_project_to_travis
# add_project_to_travis("conda-forge", name + "-feedstock")
# print_rate_limiting_info(gh_travis, 'GH_TRAVIS_TOKEN')
# end of hack
try:
subprocess.check_call(
['conda', 'smithy', 'register-ci', '--without-appveyor',
'--without-webservice', '--feedstock_directory',
feedstock_dir] + owner_info)
subprocess.check_call(
['conda', 'smithy', 'rerender'], cwd=feedstock_dir)
except subprocess.CalledProcessError:
exit_code = 0
traceback.print_exception(*sys.exc_info())
continue
# slow down so we make sure we are registered
for i in range(1, 13):
time.sleep(10)
print("Waiting for registration: {i} s".format(i=i*10))
# if we get here, now we make the feedstock token and add the staging token
print("making the feedstock token and adding the staging binstar token")
try:
if not feedstock_token_exists("conda-forge", name + "-feedstock"):
subprocess.check_call(
['conda', 'smithy', 'generate-feedstock-token',
'--feedstock_directory', feedstock_dir] + owner_info)
subprocess.check_call(
['conda', 'smithy', 'register-feedstock-token',
'--feedstock_directory', feedstock_dir] + owner_info)
# add staging token env var to all CI probiders except appveyor
# and azure
# azure has it by default and appveyor is not used
subprocess.check_call(
['conda', 'smithy', 'rotate-binstar-token',
'--without-appveyor', '--without-azure',
'--token_name', 'STAGING_BINSTAR_TOKEN'],
cwd=feedstock_dir)
yaml = YAML()
with open(os.path.join(feedstock_dir, "conda-forge.yml"), "r") as fp:
_cfg = yaml.load(fp.read())
_cfg["conda_forge_output_validation"] = True
with open(os.path.join(feedstock_dir, "conda-forge.yml"), "w") as fp:
yaml.dump(_cfg, fp)
subprocess.check_call(
["git", "add", "conda-forge.yml"],
cwd=feedstock_dir
)
subprocess.check_call(
['conda', 'smithy', 'rerender'], cwd=feedstock_dir)
except subprocess.CalledProcessError:
exit_code = 0
traceback.print_exception(*sys.exc_info())
continue
print("making a commit and pushing...")
subprocess.check_call(
['git', 'commit', '--allow-empty', '-am',
"Re-render the feedstock after CI registration."], cwd=feedstock_dir)
for i in range(5):
try:
# Capture the output, as it may contain the GH_TOKEN.
out = subprocess.check_output(
['git', 'push', 'upstream_with_token', 'HEAD:master'],
cwd=feedstock_dir,
stderr=subprocess.STDOUT)
break
except subprocess.CalledProcessError:
pass
# Likely another job has already pushed to this repo.
# Place our changes on top of theirs and try again.
out = subprocess.check_output(
['git', 'fetch', 'upstream_with_token', 'master'],
cwd=feedstock_dir,
stderr=subprocess.STDOUT)
try:
subprocess.check_call(
['git', 'rebase', 'upstream_with_token/master', 'master'],
cwd=feedstock_dir)
except subprocess.CalledProcessError:
# Handle rebase failure by choosing the changes in `master`.
subprocess.check_call(
['git', 'checkout', 'master', '--', '.'],
cwd=feedstock_dir)
subprocess.check_call(
['git', 'rebase', '--continue'], cwd=feedstock_dir)
# Remove this recipe from the repo.
if is_merged_pr:
subprocess.check_call(['git', 'rm', '-rf', recipe_dir])
# hack to help travis work
# from conda_smithy.ci_register import travis_cleanup
# travis_cleanup("conda-forge", name + "-feedstock")
# end of hack
# Update status based on the remote.
subprocess.check_call(['git', 'stash', '--keep-index', '--include-untracked'])
subprocess.check_call(['git', 'fetch'])
# CBURR: Debugging
subprocess.check_call(['git', 'status'])
subprocess.check_call(['git', 'rebase', '--autostash'])
subprocess.check_call(['git', 'add', '.'])
try:
subprocess.check_call(['git', 'stash', 'pop'])
except subprocess.CalledProcessError:
# In case there was nothing to stash.
# Finish quietly.
pass
# Parse `git status --porcelain` to handle some merge conflicts and
# generate the removed recipe list.
changed_files = subprocess.check_output(
['git', 'status', '--porcelain', recipe_directory_name],
universal_newlines=True)
changed_files = changed_files.splitlines()
# Add all files from AU conflicts. They are new files that we
# weren't tracking previously.
# Adding them resolves the conflict and doesn't actually add anything to the index.
new_file_conflicts = filter(lambda _: _.startswith("AU "), changed_files)
new_file_conflicts = map(
lambda _: _.replace("AU", "", 1).lstrip(), new_file_conflicts)
for each_new_file in new_file_conflicts:
subprocess.check_call(['git', 'add', each_new_file])
# Generate a fresh listing of recipes removed.
#
# * Each line we get back is a change to a file in the recipe directory.
# * We narrow the list down to recipes that are staged for deletion
# (ignores examples).
# * Then we clean up the list so that it only has the recipe names.
removed_recipes = filter(lambda _: _.startswith("D "), changed_files)
removed_recipes = map(lambda _: _.replace("D", "", 1).lstrip(), removed_recipes)
removed_recipes = map(
lambda _: os.path.relpath(_, recipe_directory_name), removed_recipes)
removed_recipes = map(lambda _: _.split(os.path.sep)[0], removed_recipes)
removed_recipes = sorted(set(removed_recipes))
# Commit any removed packages.
subprocess.check_call(['git', 'status'])
if removed_recipes:
msg = ('Removed recipe{s} ({}) after converting into feedstock{s}.'
''.format(', '.join(removed_recipes),
s=('s' if len(removed_recipes) > 1 else '')))
msg += ' [ci skip]'
if is_merged_pr:
# Capture the output, as it may contain the GH_TOKEN.
out = subprocess.check_output(
['git', 'remote', 'add', 'upstream_with_token',
'https://x-access-token:{}@github.com/'
'conda-forge/staged-recipes'.format(os.environ['GH_TOKEN'])],
stderr=subprocess.STDOUT)
subprocess.check_call(['git', 'commit', '-m', msg])
# Capture the output, as it may contain the GH_TOKEN.
branch = os.environ.get('CF_CURRENT_BRANCH')
out = subprocess.check_output(
['git', 'push', 'upstream_with_token', 'HEAD:%s' % branch],
stderr=subprocess.STDOUT)
else:
print('Would git commit, with the following message: \n {}'.format(msg))
if gh:
# Get our final rate limit info.
print_rate_limiting_info(gh, 'GH_TOKEN')
if gh_drone:
print_rate_limiting_info(gh_drone, 'GH_DRONE_TOKEN')
if gh_travis:
print_rate_limiting_info(gh_travis, 'GH_TRAVIS_TOKEN')
sys.exit(exit_code)
| staged-recipes-main | .travis_scripts/create_feedstocks.py |
#!/usr/bin/env python
"""
Copyright (c) 2016, Continuum Analytics, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Continuum Analytics nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function, division
import logging
import os
import pkg_resources
import re
import subprocess
import networkx as nx
from conda_build import api, conda_interface
from conda_build.metadata import find_recipe, MetaData
from conda_build.utils import HashableDict
log = logging.getLogger(__file__)
CONDA_BUILD_CACHE = os.environ.get("CONDA_BUILD_CACHE")
hash_length = api.Config().hash_length
def package_key(metadata, worker_label, run='build'):
# get the build string from whatever conda-build makes of the configuration
used_loop_vars = metadata.get_used_loop_vars()
build_vars = '-'.join([k + '_' + str(metadata.config.variant[k]) for k in used_loop_vars
if k != 'target_platform'])
# kind of a special case. Target platform determines a lot of output behavior, but may not be
# explicitly listed in the recipe.
tp = metadata.config.variant.get('target_platform')
if tp and tp != metadata.config.subdir and 'target_platform' not in build_vars:
build_vars += '-target_' + tp
key = [metadata.name(), metadata.version()]
if build_vars:
key.append(build_vars)
key.extend(['on', worker_label])
key = "-".join(key)
if run == 'test':
key = '-'.join(('c3itest', key))
return key
def _git_changed_files(git_rev, stop_rev=None, git_root=''):
if not git_root:
git_root = os.getcwd()
if stop_rev:
git_rev = "{0}..{1}".format(git_rev, stop_rev)
print("Changed files from:", git_rev, stop_rev, git_root)
output = subprocess.check_output(['git', '-C', git_root, 'diff-tree',
'--no-commit-id', '--name-only', '-r', git_rev])
files = output.decode().splitlines()
return files
def _get_base_folders(base_dir, changed_files):
recipe_dirs = []
for f in changed_files:
# only consider files that come from folders
if '/' in f:
f = f.split('/')[0]
try:
find_recipe(os.path.join(base_dir, f))
recipe_dirs.append(f)
except IOError:
pass
return recipe_dirs
def git_changed_submodules(git_rev='HEAD@{1}', stop_rev=None, git_root='.'):
if stop_rev is not None:
git_rev = "{0}..{1}".format(git_rev, stop_rev)
diff_script = pkg_resources.resource_filename('conda_concourse_ci', 'diff-script.sh')
diff = subprocess.check_output(['bash', diff_script, git_rev],
cwd=git_root, universal_newlines=True)
submodule_changed_files = [line.split() for line in diff.splitlines()]
submodules_with_recipe_changes = []
for submodule in submodule_changed_files:
for file in submodule:
if 'recipe/' in file and submodule[0] not in submodules_with_recipe_changes:
submodules_with_recipe_changes.append(submodule[0])
return submodules_with_recipe_changes
def git_new_submodules(git_rev='HEAD@{1}', stop_rev=None, git_root='.'):
if stop_rev is not None:
git_rev = "{0}..{1}".format(git_rev, stop_rev)
new_submodule_script = pkg_resources.resource_filename('conda_concourse_ci',
'new-submodule-script.sh')
diff = subprocess.check_output(['bash', new_submodule_script, git_rev],
cwd=git_root, universal_newlines=True)
return diff.splitlines()
def git_renamed_folders(git_rev='HEAD@{1}', stop_rev=None, git_root='.'):
if stop_rev is not None:
git_rev = "{0}..{1}".format(git_rev, stop_rev)
rename_script = pkg_resources.resource_filename('conda_concourse_ci',
'rename-script.sh')
renamed_files = subprocess.check_output(['bash', rename_script], cwd=git_root,
universal_newlines=True).splitlines()
return renamed_files
def git_changed_recipes(git_rev='HEAD@{1}', stop_rev=None, git_root='.'):
"""
Get the list of files changed in a git revision and return a list of
package directories that have been modified.
git_rev: if stop_rev is not provided, this represents the changes
introduced by the given git rev. It is equivalent to
git_rev=SOME_REV@{1} and stop_rev=SOME_REV
stop_rev: when provided, this is the end of a range of revisions to
consider. git_rev becomes the start revision. Note that the
start revision is *one before* the actual start of examining
commits for changes. In other words:
git_rev=SOME_REV@{1} and stop_rev=SOME_REV => only SOME_REV
git_rev=SOME_REV@{2} and stop_rev=SOME_REV => two commits, SOME_REV and the
one before it
"""
changed_files = _git_changed_files(git_rev, stop_rev, git_root)
recipe_dirs = _get_base_folders(git_root, changed_files)
changed_submodules = git_changed_submodules(git_rev, stop_rev, git_root)
new_submodules = git_new_submodules(git_rev, stop_rev, git_root)
renamed_folders = git_renamed_folders(git_rev, stop_rev, git_root)
return recipe_dirs + changed_submodules + new_submodules + renamed_folders
def _deps_to_version_dict(deps):
d = {}
for x in deps:
x = x.strip().split()
if len(x) == 3:
d[x[0]] = (x[1], x[2])
elif len(x) == 2:
d[x[0]] = (x[1], 'any')
else:
d[x[0]] = ('any', 'any')
return d
def get_build_deps(meta):
build_reqs = meta.get_value('requirements/build')
if not build_reqs:
build_reqs = []
return _deps_to_version_dict(build_reqs)
def get_run_test_deps(meta):
run_reqs = meta.get_value('requirements/run')
if not run_reqs:
run_reqs = []
test_reqs = meta.get_value('test/requires')
if not test_reqs:
test_reqs = []
return _deps_to_version_dict(run_reqs + test_reqs)
_rendered_recipes = {}
@conda_interface.memoized
def _get_or_render_metadata(meta_file_or_recipe_dir, worker, finalize, config=None):
global _rendered_recipes
platform = worker['platform']
arch = str(worker['arch'])
if (meta_file_or_recipe_dir, platform, arch) not in _rendered_recipes:
print("rendering {0} for {1}".format(meta_file_or_recipe_dir, worker['label']))
_rendered_recipes[(meta_file_or_recipe_dir, platform, arch)] = \
api.render(meta_file_or_recipe_dir, platform=platform, arch=arch,
verbose=False, permit_undefined_jinja=True,
bypass_env_check=True, config=config, finalize=finalize)
return _rendered_recipes[(meta_file_or_recipe_dir, platform, arch)]
def add_recipe_to_graph(recipe_dir, graph, run, worker, conda_resolve,
recipes_dir=None, config=None, finalize=False):
try:
rendered = _get_or_render_metadata(recipe_dir, worker, config=config, finalize=finalize)
except (IOError, SystemExit) as e:
log.exception('invalid recipe dir: %s', recipe_dir)
raise
name = None
for (metadata, _, _) in rendered:
name = package_key(metadata, worker['label'], run)
if metadata.skip():
continue
if name not in graph.nodes():
graph.add_node(name, meta=metadata, worker=worker)
add_dependency_nodes_and_edges(name, graph, run, worker, conda_resolve, config=config,
recipes_dir=recipes_dir, finalize=finalize)
# # add the test equivalent at the same time. This is so that expanding can find it.
# if run == 'build':
# add_recipe_to_graph(recipe_dir, graph, 'test', worker, conda_resolve,
# recipes_dir=recipes_dir)
# test_key = package_key(metadata, worker['label'])
# graph.add_edge(test_key, name)
# upload_key = package_key(metadata, worker['label'])
# graph.add_node(upload_key, meta=metadata, worker=worker)
# graph.add_edge(upload_key, test_key)
return name
def match_peer_job(target_matchspec, other_m, this_m=None):
"""target_matchspec comes from the recipe. target_variant is the variant from the recipe whose
deps we are matching. m is the peer job, which must satisfy conda and also have matching keys
for any keys that are shared between target_variant and m.config.variant"""
match_dict = {'name': other_m.name(),
'version': other_m.version(),
'build': _fix_any(other_m.build_id(), other_m.config), }
if conda_interface.conda_43:
match_dict = conda_interface.Dist(name=match_dict['name'],
dist_name='-'.join((match_dict['name'],
match_dict['version'],
match_dict['build'])),
version=match_dict['version'],
build_string=match_dict['build'],
build_number=int(other_m.build_number() or 0),
channel=None)
matchspec_matches = target_matchspec.match(match_dict)
variant_matches = True
if this_m:
other_m_used_vars = other_m.get_used_loop_vars()
for v in this_m.get_used_loop_vars():
if v in other_m_used_vars:
variant_matches &= this_m.config.variant[v] == other_m.config.variant[v]
return matchspec_matches and variant_matches
def add_intradependencies(graph):
"""ensure that downstream packages wait for upstream build/test (not use existing
available packages)"""
for node in graph.nodes():
if 'meta' not in graph.nodes[node]:
continue
# get build dependencies
m = graph.nodes[node]['meta']
# this is pretty hard. Realistically, we would want to know
# what the build and host platforms are on the build machine.
# However, all we know right now is what machine we're actually
# on (the one calculating the graph).
test_requires = m.meta.get('test', {}).get('requires', [])
log.info("node: {}".format(node))
log.info(" build: {}".format(m.ms_depends('build')))
log.info(" host: {}".format(m.ms_depends('host')))
log.info(" run: {}".format(m.ms_depends('run')))
log.info(" test: {}".format(test_requires))
deps = set(m.ms_depends('build') + m.ms_depends('host') + m.ms_depends('run') +
[conda_interface.MatchSpec(dep) for dep in test_requires or []])
for dep in deps:
name_matches = (n for n in graph.nodes() if graph.nodes[n]['meta'].name() == dep.name)
for matching_node in name_matches:
# are any of these build dependencies also nodes in our graph?
if (match_peer_job(conda_interface.MatchSpec(dep),
graph.nodes[matching_node]['meta'],
m) and
(node, matching_node) not in graph.edges()):
# add edges if they don't already exist
graph.add_edge(node, matching_node)
def collapse_subpackage_nodes(graph):
"""Collapse all subpackage nodes into their parent recipe node
We get one node per output, but a given recipe can have multiple outputs. It's important
for dependency ordering in the graph that the outputs exist independently, but once those
dependencies are established, we need to collapse subpackages down to a single job for the
top-level recipe."""
# group nodes by their recipe path first, then within those groups by their variant
node_groups = {}
for node in graph.nodes():
if 'meta' in graph.nodes[node]:
meta = graph.nodes[node]['meta']
meta_path = meta.meta_path or meta.meta['extra']['parent_recipe']['path']
master = False
master_meta = MetaData(meta_path, config=meta.config)
if master_meta.name() == meta.name():
master = True
group = node_groups.get(meta_path, {})
subgroup = group.get(HashableDict(meta.config.variant), {})
if master:
if 'master' in subgroup:
raise ValueError("tried to set more than one node in a group as master")
subgroup['master'] = node
else:
sps = subgroup.get('subpackages', [])
sps.append(node)
subgroup['subpackages'] = sps
group[HashableDict(meta.config.variant)] = subgroup
node_groups[meta_path] = group
for recipe_path, group in node_groups.items():
for variant, subgroup in group.items():
# if no node is the top-level recipe (only outputs, no top-level output), need to obtain
# package/name from recipe given by common recipe path.
subpackages = subgroup.get('subpackages')
if 'master' not in subgroup:
sp0 = graph.nodes[subpackages[0]]
master_meta = MetaData(recipe_path, config=sp0['meta'].config)
worker = sp0['worker']
master_key = package_key(master_meta, worker['label'])
graph.add_node(master_key, meta=master_meta, worker=worker)
master = graph.nodes[master_key]
else:
master = subgroup['master']
master_key = package_key(graph.nodes[master]['meta'],
graph.nodes[master]['worker']['label'])
# fold in dependencies for all of the other subpackages within a group. This is just
# the intersection of the edges between all nodes. Store this on the "master" node.
if subpackages:
remap_edges = [edge for edge in graph.edges() if edge[1] in subpackages]
for edge in remap_edges:
# make sure not to add references to yourself
if edge[0] != master_key:
graph.add_edge(edge[0], master_key)
graph.remove_edge(*edge)
# remove nodes that have been folded into master nodes
for subnode in subpackages:
graph.remove_node(subnode)
def construct_graph(recipes_dir, worker, run, conda_resolve, folders=(),
git_rev=None, stop_rev=None, matrix_base_dir=None,
config=None, finalize=False):
'''
Construct a directed graph of dependencies from a directory of recipes
run: whether to use build or run/test requirements for the graph. Avoids cycles.
values: 'build' or 'test'. Actually, only 'build' matters - otherwise, it's
run/test for any other value.
'''
matrix_base_dir = matrix_base_dir or recipes_dir
if not os.path.isabs(recipes_dir):
recipes_dir = os.path.normpath(os.path.join(os.getcwd(), recipes_dir))
assert os.path.isdir(recipes_dir)
if not folders:
if not git_rev:
git_rev = 'HEAD'
folders = git_changed_recipes(git_rev, stop_rev=stop_rev,
git_root=recipes_dir)
graph = nx.DiGraph()
for folder in folders:
recipe_dir = os.path.join(recipes_dir, folder)
if not os.path.isdir(recipe_dir):
raise ValueError("Specified folder {} does not exist".format(recipe_dir))
add_recipe_to_graph(recipe_dir, graph, run, worker, conda_resolve,
recipes_dir, config=config, finalize=finalize)
add_intradependencies(graph)
collapse_subpackage_nodes(graph)
return graph
def _fix_any(value, config):
value = re.sub('any(?:h[0-9a-f]{%d})?' % config.hash_length, '', value)
return value
@conda_interface.memoized
def _installable(name, version, build_string, config, conda_resolve):
"""Can Conda install the package we need?"""
ms = conda_interface.MatchSpec(" ".join([name, _fix_any(version, config),
_fix_any(build_string, config)]))
installable = conda_resolve.find_matches(ms)
if not installable:
log.warn("Dependency {name}, version {ver} is not installable from your "
"channels: {channels} with subdir {subdir}. Seeing if we can build it..."
.format(name=name, ver=version, channels=config.channel_urls,
subdir=config.host_subdir))
return installable
def _buildable(name, version, recipes_dir, worker, config, finalize):
"""Does the recipe that we have available produce the package we need?"""
possible_dirs = os.listdir(recipes_dir)
packagename_re = re.compile(r'%s(?:\-[0-9]+[\.0-9\_\-a-zA-Z]*)?$' % name)
likely_dirs = (dirname for dirname in possible_dirs if
(os.path.isdir(os.path.join(recipes_dir, dirname)) and
packagename_re.match(dirname)))
metadata_tuples = [m for path in likely_dirs
for (m, _, _) in _get_or_render_metadata(os.path.join(recipes_dir,
path), worker, finalize=finalize)]
# this is our target match
ms = conda_interface.MatchSpec(" ".join([name, _fix_any(version, config)]))
available = False
for m in metadata_tuples:
available = match_peer_job(ms, m)
if available:
break
return m.meta_path if available else False
def add_dependency_nodes_and_edges(node, graph, run, worker, conda_resolve, recipes_dir=None,
finalize=False, config=None):
'''add build nodes for any upstream deps that are not yet installable
changes graph in place.
'''
metadata = graph.nodes[node]['meta']
# for plain test runs, ignore build reqs.
deps = get_run_test_deps(metadata)
recipes_dir = recipes_dir or os.getcwd()
# cross: need to distinguish between build_subdir (build reqs) and host_subdir
if run == 'build':
deps.update(get_build_deps(metadata))
for dep, (version, build_str) in deps.items():
# we don't need worker info in _installable because it is already part of conda_resolve
if not _installable(dep, version, build_str, metadata.config, conda_resolve):
recipe_dir = _buildable(dep, version, recipes_dir, worker, metadata.config,
finalize=finalize)
if not recipe_dir:
continue
# raise ValueError("Dependency {} is not installable, and recipe (if "
# " available) can't produce desired version ({})."
# .format(dep, version))
dep_name = add_recipe_to_graph(recipe_dir, graph, 'build', worker,
conda_resolve, recipes_dir, config=config, finalize=finalize)
if not dep_name:
raise ValueError("Tried to build recipe {0} as dependency, which is skipped "
"in meta.yaml".format(recipe_dir))
graph.add_edge(node, dep_name)
def expand_run_upstream(graph, conda_resolve, worker, run, steps=0, max_downstream=5,
recipes_dir=None, matrix_base_dir=None):
pass
def expand_run(graph, conda_resolve, worker, run, steps=0, max_downstream=5,
recipes_dir=None, matrix_base_dir=None, finalize=False):
"""Apply the build label to any nodes that need (re)building or testing.
"need rebuilding" means both packages that our target package depends on,
but are not yet built, as well as packages that depend on our target
package. For the latter, you can specify how many dependencies deep (steps)
to follow that chain, since it can be quite large.
If steps is -1, all downstream dependencies are rebuilt or retested
"""
downstream = 0
initial_nodes = len(graph.nodes())
# for build, we get test automatically. Give people the max_downstream in terms
# of packages, not tasks
# if run == 'build':
# max_downstream *= 2
def expand_step(task_graph, full_graph, downstream):
for node in task_graph.nodes():
for predecessor in full_graph.predecessors(node):
if max_downstream < 0 or (downstream - initial_nodes) < max_downstream:
add_recipe_to_graph(
os.path.dirname(full_graph.nodes[predecessor]['meta'].meta_path),
task_graph, run=run, worker=worker, conda_resolve=conda_resolve,
recipes_dir=recipes_dir, finalize=finalize)
downstream += 1
return len(graph.nodes())
# starting from our initial collection of dirty nodes, trace the tree down to packages
# that depend on the dirty nodes. These packages may need to be rebuilt, or perhaps
# just tested. The 'run' argument determines which.
if steps != 0:
if not recipes_dir:
raise ValueError("recipes_dir is necessary if steps != 0. "
"Please pass it as an argument.")
# here we need to fully populate a graph that has the right build or run/test deps.
# We don't create this elsewhere because it is unnecessary and costly.
# get all immediate subdirectories
other_top_dirs = [d for d in os.listdir(recipes_dir)
if os.path.isdir(os.path.join(recipes_dir, d)) and
not d.startswith('.')]
recipe_dirs = []
for recipe_dir in other_top_dirs:
try:
find_recipe(os.path.join(recipes_dir, recipe_dir))
recipe_dirs.append(recipe_dir)
except IOError:
pass
# constructing the graph for build will automatically also include the test deps
full_graph = construct_graph(recipes_dir, worker, 'build', folders=recipe_dirs,
matrix_base_dir=matrix_base_dir, conda_resolve=conda_resolve)
if steps >= 0:
for step in range(steps):
downstream = expand_step(graph, full_graph, downstream)
else:
while True:
nodes = graph.nodes()
downstream = expand_step(graph, full_graph, downstream)
if nodes == graph.nodes():
break
def order_build(graph):
'''
Assumes that packages are in graph.
Builds a temporary graph of relevant nodes and returns it topological sort.
Relevant nodes selected in a breadth first traversal sourced at each pkg
in packages.
'''
reorder_cyclical_test_dependencies(graph)
try:
order = list(nx.topological_sort(graph))
order.reverse()
except nx.exception.NetworkXUnfeasible:
raise ValueError("Cycles detected in graph: %s", nx.find_cycle(graph,
orientation='reverse'))
return order
def reorder_cyclical_test_dependencies(graph):
"""By default, we make things that depend on earlier outputs for build wait for tests of
the earlier thing to pass. However, circular dependencies spread across run/test and
build/host can make this approach incorrect. For example:
A <-- B : B depends on A at build time
B <-- A : A depends on B at run time. We can build A before B, but we cannot test A until B
is built.
To resolve this, we must reorder the graph edges:
build A <-- test A <--> build B <-- test B
must become:
build A <-- build B <-- test A <-- test B
"""
# find all test nodes with edges to build nodes
test_nodes = [node for node in graph.nodes() if node.startswith('test-')]
edges_from_test_to_build = [edge for edge in graph.edges() if edge[0] in test_nodes and
edge[1].startswith('build-')]
# find any of their inverses. Entries here are of the form (test-A, build-B)
circular_deps = [edge for edge in edges_from_test_to_build
if (edge[1], edge[0]) in graph.edges()]
for (testA, buildB) in circular_deps:
# remove build B dependence on test A
graph.remove_edge(testA, buildB)
# remove test B dependence on build B
testB = buildB.replace('build-', 'test-', 1)
graph.remove_edge(buildB, testB)
# Add test B dependence on test A
graph.add_edge(testA, testB)
# make sure that test A still depends on build B
assert (buildB, testA) in graph.edges()
# graph is modified in place. No return necessary.
| staged-recipes-main | .ci_support/compute_build_graph.py |
import conda_build.conda_interface
import networkx as nx
import conda_build.api
from compute_build_graph import construct_graph
import argparse
import os
from collections import OrderedDict
import sys
import subprocess
import yaml
try:
from ruamel_yaml import BaseLoader, load
except ImportError:
from yaml import BaseLoader, load
def get_host_platform():
from sys import platform
if platform == "linux" or platform == "linux2":
return "linux"
elif platform == "darwin":
return "osx"
elif platform == "win32":
return "win"
def build_all(recipes_dir, arch):
folders = os.listdir(recipes_dir)
old_comp_folders = []
new_comp_folders = []
if not folders:
print("Found no recipes to build")
return
platform = get_host_platform()
script_dir = os.path.dirname(os.path.realpath(__file__))
variant_config_file = os.path.join(script_dir, '{}{}.yaml'.format(
platform, arch))
found_cuda = False
found_centos7 = False
for folder in folders:
meta_yaml = os.path.join(recipes_dir, folder, "meta.yaml")
if os.path.exists(meta_yaml):
with(open(meta_yaml, "r", encoding="utf-8")) as f:
text = ''.join(f.readlines())
if 'cuda' in text:
found_cuda = True
if 'sysroot_linux-64' in text:
found_centos7 = True
if found_cuda:
print('##vso[task.setvariable variable=NEED_CUDA;isOutput=true]1')
if found_centos7:
print('##vso[task.setvariable variable=NEED_CENTOS7;isOutput=true]1')
deployment_version = (0, 0)
sdk_version = (0, 0)
for folder in folders:
cbc = os.path.join(recipes_dir, folder, "conda_build_config.yaml")
if os.path.exists(cbc):
with open(cbc, "r") as f:
text = ''.join(f.readlines())
if platform == 'osx' and (
'MACOSX_DEPLOYMENT_TARGET' in text or
'MACOSX_SDK_VERSION' in text):
config = load(text, Loader=BaseLoader)
if 'MACOSX_DEPLOYMENT_TARGET' in config:
for version in config['MACOSX_DEPLOYMENT_TARGET']:
version = tuple([int(x) for x in version.split('.')])
deployment_version = max(deployment_version, version)
if 'MACOSX_SDK_VERSION' in config:
for version in config['MACOSX_SDK_VERSION']:
version = tuple([int(x) for x in version.split('.')])
sdk_version = max(sdk_version, deployment_version, version)
with open(variant_config_file, 'r') as f:
variant_text = ''.join(f.readlines())
if deployment_version != (0, 0):
deployment_version = '.'.join([str(x) for x in deployment_version])
print("Overriding MACOSX_DEPLOYMENT_TARGET to be ", deployment_version)
variant_text += '\nMACOSX_DEPLOYMENT_TARGET:\n'
variant_text += f'- {deployment_version}\n'
if sdk_version != (0, 0):
sdk_version = '.'.join([str(x) for x in sdk_version])
print("Overriding MACOSX_SDK_VERSION to be ", sdk_version)
variant_text += '\nMACOSX_SDK_VERSION:\n'
variant_text += f'- {sdk_version}\n'
with open(variant_config_file, 'w') as f:
f.write(variant_text)
if platform == "osx" and (sdk_version != (0, 0) or deployment_version != (0, 0)):
subprocess.run("run_conda_forge_build_setup", shell=True, check=True)
print("Building {} with conda-forge/label/main".format(','.join(folders)))
channel_urls = ['local', 'conda-forge']
build_folders(recipes_dir, folders, arch, channel_urls)
def get_config(arch, channel_urls):
exclusive_config_file = os.path.join(conda_build.conda_interface.root_dir,
'conda_build_config.yaml')
platform = get_host_platform()
script_dir = os.path.dirname(os.path.realpath(__file__))
variant_config_files = []
variant_config_file = os.path.join(script_dir, '{}{}.yaml'.format(
platform, arch))
if os.path.exists(variant_config_file):
variant_config_files.append(variant_config_file)
error_overlinking = (get_host_platform() != "win")
config = conda_build.api.Config(
variant_config_files=variant_config_files, arch=arch,
exclusive_config_file=exclusive_config_file, channel_urls=channel_urls,
error_overlinking=error_overlinking)
return config
def build_folders(recipes_dir, folders, arch, channel_urls):
index_path = os.path.join(sys.exec_prefix, 'conda-bld')
os.makedirs(index_path, exist_ok=True)
conda_build.api.update_index(index_path)
index = conda_build.conda_interface.get_index(channel_urls=channel_urls)
conda_resolve = conda_build.conda_interface.Resolve(index)
config = get_config(arch, channel_urls)
platform = get_host_platform()
worker = {'platform': platform, 'arch': arch,
'label': '{}-{}'.format(platform, arch)}
G = construct_graph(recipes_dir, worker=worker, run='build',
conda_resolve=conda_resolve, folders=folders,
config=config, finalize=False)
order = list(nx.topological_sort(G))
order.reverse()
print('Computed that there are {} distributions to build from {} recipes'
.format(len(order), len(folders)))
if not order:
print('Nothing to do')
return
print("Resolved dependencies, will be built in the following order:")
print(' '+'\n '.join(order))
d = OrderedDict()
for node in order:
d[G.nodes[node]['meta'].meta_path] = 1
for recipe in d.keys():
conda_build.api.build([recipe], config=get_config(arch, channel_urls))
def check_recipes_in_correct_dir(root_dir, correct_dir):
from pathlib import Path
for path in Path(root_dir).rglob('meta.yaml'):
path = path.absolute().relative_to(root_dir)
if path.parts[0] != correct_dir:
raise RuntimeError(f"recipe {path.parts} in wrong directory")
if len(path.parts) != 3:
raise RuntimeError(f"recipe {path.parts} in wrong directory")
def read_mambabuild(recipes_dir):
folders = os.listdir(recipes_dir)
use_it = True
for folder in folders:
cf = os.path.join(recipes_dir, folder, "conda-forge.yml")
if os.path.exists(cf):
with open(cf, "r") as f:
cfy = yaml.safe_load(f.read())
use_it = use_it and cfy.get("build_with_mambabuild", True)
return use_it
def use_mambabuild():
from boa.cli.mambabuild import prepare
prepare()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--arch', default='64',
help='target architecture (64 or 32)')
args = parser.parse_args()
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
check_recipes_in_correct_dir(root_dir, "recipes")
use_mamba = read_mambabuild(os.path.join(root_dir, "recipes"))
if use_mamba:
use_mambabuild()
subprocess.run("conda clean --all --yes", shell=True, check=True)
build_all(os.path.join(root_dir, "recipes"), args.arch)
| staged-recipes-main | .ci_support/build_all.py |
import sys
from setuptools import setup, find_packages
sys.path[0:0] = ['lightweight_gan']
from version import __version__
setup(
name = 'lightweight-gan',
packages = find_packages(),
entry_points={
'console_scripts': [
'lightweight_gan = lightweight_gan.cli:main',
],
},
version = __version__,
license='MIT',
description = 'Lightweight GAN',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/lightweight-gan',
keywords = [
'artificial intelligence',
'deep learning',
'generative adversarial networks'
],
install_requires=[
'adabelief-pytorch',
'einops>=0.3',
'fire',
'kornia>=0.5.4',
'numpy',
'pillow',
'retry',
'torch>=1.10',
'torchvision',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
) | lightweight-gan-main | setup.py |
import random
import torch
import torch.nn.functional as F
def DiffAugment(x, types=[]):
for p in types:
for f in AUGMENT_FNS[p]:
x = f(x)
return x.contiguous()
# """
# Augmentation functions got images as `x`
# where `x` is tensor with this dimensions:
# 0 - count of images
# 1 - channels
# 2 - width
# 3 - height of image
# """
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
indexing = 'ij')
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2)
return x
def rand_offset(x, ratio=1, ratio_h=1, ratio_v=1):
w, h = x.size(2), x.size(3)
imgs = []
for img in x.unbind(dim = 0):
max_h = int(w * ratio * ratio_h)
max_v = int(h * ratio * ratio_v)
value_h = random.randint(0, max_h) * 2 - max_h
value_v = random.randint(0, max_v) * 2 - max_v
if abs(value_h) > 0:
img = torch.roll(img, value_h, 2)
if abs(value_v) > 0:
img = torch.roll(img, value_v, 1)
imgs.append(img)
return torch.stack(imgs)
def rand_offset_h(x, ratio=1):
return rand_offset(x, ratio=1, ratio_h=ratio, ratio_v=0)
def rand_offset_v(x, ratio=1):
return rand_offset(x, ratio=1, ratio_h=0, ratio_v=ratio)
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
indexing = 'ij')
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'offset': [rand_offset],
'offset_h': [rand_offset_h],
'offset_v': [rand_offset_v],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
| lightweight-gan-main | lightweight_gan/diff_augment.py |
__version__ = '1.1.1'
| lightweight-gan-main | lightweight_gan/version.py |
from lightweight_gan.lightweight_gan import LightweightGAN, Generator, Discriminator, Trainer, NanException
from kornia.filters import filter2d
| lightweight-gan-main | lightweight_gan/__init__.py |