python_code stringlengths 0 83.2k |
|---|
# first 3 functions taken from:
# http://www.johnvinyard.com/blog/?p=268
import numpy as np
from numpy.lib.stride_tricks import as_strided as ast
# from .arrays import normalizeMat
def norm_shape(shape):
'''
Normalize numpy array shapes so they're always expressed as a tuple,
even for one-dimensional s... |
#!#!/bin/env/python
from __future__ import print_function
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from .utils import kmeans
from joblib import Memory
_memory = Memory('.', verbose=0)
def _to_np(A):
return A.cpu().detach().numpy()
def _class_balanced_sampli... |
#!/usr/bin/env python
import os
import numpy as np
import pandas as pd
# TODO this file is hideous (but necessarily so for deadline purposes...)
#
# Also, this file is tightly coupled to figs.py; it basically has a func
# for each figure func that spits out data in exactly the required form
MCQ_RESULTS_DIR = '../re... |
#!/bin/env/python
import functools
import numpy as np
import pprint
import scipy
import time
from . import amm
from . import matmul_datasets as md
from . import pyience as pyn
from . import compress
from . import amm_methods as methods
from joblib import Memory
_memory = Memory('.', verbose=0)
# NUM_TRIALS = 1
NU... |
#!/bin/env/python
import abc
import numpy as np
# from sklearn.decomposition import PCA, SparsePCA
from sklearn import decomposition
from sklearn.decomposition import PCA, SparsePCA, MiniBatchSparsePCA
from sklearn.utils.extmath import randomized_svd
import numba # conda install numba
# import ffht # https://github... |
#!/bin/env/python
import numpy as np
def energy(A):
if A.ndim < 2 or len(A) < 2:
return 0
diffs = A - A.mean(axis=0)
return np.sum(diffs * diffs)
def run_trial(N=100, D=3, seed=None):
if seed is not None:
np.random.seed(seed)
w0, w = np.random.randn(2, D)
X = np.random.ran... |
#!/bin/env/python
import collections
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sb
import pandas as pd
import pathlib as pl
# from . import files
from . import amm_results2 as res
# from . import amm_methods as ameth
# sb.set_context('poster')
# sb.set_con... |
#!/bin/env/python
from . import amm, vq_amm
METHOD_EXACT = 'Exact'
METHOD_SCALAR_QUANTIZE = 'ScalarQuantize'
METHOD_SKETCH_SQ_SAMPLE = 'SketchSqSample'
METHOD_SVD = 'SVD' # truncated SVD run on the matrix at test time
METHOD_FD_AMM = 'FD-AMM'
METHOD_COOCCUR = 'CooccurSketch'
METHOD_PCA = 'PCA' # PCA projection, wit... |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import pprint
microbench_output = \
"""
ncodebooks = 4
amm bolt N, D, M, ncodebooks: 10000, 512, 10, 4 (5x20): 7.574 (4.225e+07/s), 7.582 (4.221e+07/s), 7.584 (4.219e+07/s), 7.587 (4.218e+07/s), 7.579 (4.222e+07/s),
amm bolt N, D, M,... |
#!/usr/bin/env python
import itertools
import numpy as np
from sklearn import cluster
from scipy import signal
# import types
import kmc2 # state-of-the-art kmeans initialization (as of NIPS 2016)
from joblib import Memory
_memory = Memory('.', verbose=0)
# ========================================================... |
#!/usr/bin/env python
import numpy as np
import numba
import zstandard as zstd # pip install zstandard
# ================================================================ Funcs
def nbits_cost(diffs, signed=True):
"""
>>> [nbits_cost(i) for i in [0, 1, 2, 3, 4, 5, 7, 8, 9]]
[0, 2, 3, 3, 4, 4, 4, 5, 5]
... |
#!/usr/bin/env python
from __future__ import print_function
import os
import numpy as np
import pandas as pd
from io import StringIO
from . import amm_methods as methods
from joblib import Memory
_memory = Memory('.', verbose=1)
pd.options.mode.chained_assignment = None # suppress stupid warning
RESULTS_DIR = o... |
#!/usr/bin/env python
import abc
import numpy as np
from . import vquantizers as vq
from . import amm
KEY_NLOOKUPS = 'nlookups'
class VQMatmul(amm.ApproxMatmul, abc.ABC):
def __init__(self, ncodebooks, ncentroids=None):
self.ncodebooks = ncodebooks
self.ncentroids = (self._get_ncentroids() if n... |
#!/bin/env/python
"""utility functions for running experiments"""
from __future__ import print_function, absolute_import
import datetime
import os
import itertools
import warnings
import numpy as np
import pandas as pd
import sys
import sklearn
# from sklearn.model_selection import StratifiedKFold
from python.file... |
#!/usr/bin/env python
import functools
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy.stats.stats import pearsonr
import seaborn as sb
import time
from collections import namedtuple
# import datasets
import files
import product_quantize as pq
import pyience as pyn
from datasets import neigh... |
#!/bin/env/python
import copy
import numpy as np
from functools import reduce
import numba
from sklearn.decomposition import PCA
from sklearn import linear_model
from . import subspaces as subs
from joblib import Memory
_memory = Memory('.', verbose=0)
# def bucket_id_to_new_bucket_ids(old_id):
# i = 2 * old_i... |
#!/usr/bin/env python
from __future__ import division, absolute_import
import abc
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sb
from . import product_quantize as pq
from . import subspaces as subs
from . import clusterize
from .utils import kmeans
# =======================================... |
#!/bin/env/python
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sb
import pandas as pd
import pathlib as pl
# from . import files
from . import amm_results as res
from . import amm_methods as ameth
sb.set_context('poster')
# sb.set_context('talk')
# sb.set_cmap('tab10')
RESULTS_DIR... |
#!/usr/bin/env python
import time
import numpy as np
from .utils import kmeans, orthonormalize_rows, random_rotation
from joblib import Memory
_memory = Memory('.', verbose=0)
# ================================================================ PQ
@_memory.cache
def learn_pq(X, ncentroids, nsubvects, subvect_len, m... |
#!/bin/env/python
import os
import shutil
def ls(dir='.'):
return os.listdir(dir)
def is_hidden(path):
return os.path.basename(path).startswith('.')
def is_visible(path):
return not is_hidden(path)
def join_paths(dir, contents):
return [os.path.join(dir, f) for f in contents]
def files_matchi... |
#!/bin/env python
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import PIL
from PIL import Image
from PIL import ImageOps # can't just do PIL.ImageOps for some reason
from . import files
# ================================ TODO rm duplicate code from imagenet.py
# ... |
#!/bin/env python
from __future__ import print_function
import numpy as np
import os
import warnings
import h5py
from sklearn.datasets import load_digits
import keras
from keras.preprocessing import image
# from python import imagenet, svhn, caltech
# from python.datasets import caltech
from . import imagenet
from .... |
#!/bin/env python
from __future__ import division, print_function
import numpy as np
# import pyedflib as edf # pip install pyedflib
# import mne
from . import paths
from . import files
ECG_DIR = paths.UCD_ECG
NUM_RECORDINGS = 25
def main():
pass
print("ecg dir: ", ECG_DIR)
fpaths = files.list_files(... |
#!/usr/env/python
import os
DATASETS_DIR = os.path.expanduser("~/Desktop/datasets/")
def to_path(*args):
return os.path.join(DATASETS_DIR, *args)
# straightforward datasets
MSRC_12 = to_path('MSRC-12', 'origData')
UCR = to_path('ucr/UCRArchive_2018')
UCR_INFO = to_path('ucr/DataSummary.csv')
UWAVE = to_path('... |
#!/bin/env python
import os
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from joblib import Memory
_memory = Memory('.', verbose=1)
DATA_DIR = os.path.expanduser('~/Desktop/datasets/nn-search')
join = os.path.join
class Random:
UNIFORM = 'uniform'
GAUSS = 'gauss'
WALK = ... |
#!/usr/bin/env python
import os
# import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sb
from joblib import Memory
from . import paths
from . import files
_memory = Memory('./')
def _list_csvs(directory):
return files.list_files(directory, endswith=... |
#!/bin/env python
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import PIL
import pickle
import psutil # pip install psutil
import shutil
import sys # just for stderr for warnings
# import warnings
from PIL import Image
from python import files
from python import im... |
#!/usr/bin/env/python
import os
import numpy as np
from joblib import Memory
import pandas as pd
from . import paths
_memory = Memory('.', verbose=1, compress=9)
UCR_DATASETS_DIR = paths.UCR
UCR_INFO_PATH = paths.UCR_INFO
# ================================================================
# Public
# =============... |
#!/bin/env python
from __future__ import absolute_import, division, print_function
from scipy import io
import numpy as np
import os
from joblib import Memory
_memory = Memory('.', verbose=1)
DATADIR = '../datasets/svhn'
TRAIN_PATH = os.path.join(DATADIR, 'train_32x32.mat')
TEST_PATH = os.path.join(DATADIR, 'test_... |
#!/bin/env/python
"""utility functions for data munging"""
from __future__ import absolute_import, division, print_function
import numpy as np
import sklearn
def split_train_test(X, Y, train_frac=.8, random_state=123):
"""Returns X_train, X_test, y_train, y_test"""
np.random.seed(123)
return sklearn.mo... |
#!/bin/env python
# Load 3-lead ECG recordings from SHAREE Database:
# https://physionet.org/content/shareedb/1.0.0/
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
import os
from . import paths
from . import files
from joblib import Memory
_memory = Memory('.', v... |
#!/bin/env python
# Load 3-lead ECG recordings from SHAREE Database:
# https://physionet.org/content/shareedb/1.0.0/
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
import os
from . import paths
from . import files
from joblib import Memory
_memory = Memory('.', v... |
#!/bin/env python
# from __future__ import absolute_import, division, print_function
from __future__ import division, print_function
import numpy as np
from . import paths
from . import image_utils as imgs
from joblib import Memory
_memory = Memory('.', verbose=1)
DATADIR_101 = paths.CALTECH_101
DATADIR_256 = pat... |
#!/bin/env python
from __future__ import absolute_import, division, print_function
import numpy as np
from python import image_utils as imgs
from joblib import Memory
_memory = Memory('.', verbose=1)
DATADIR_101 = '../datasets/caltech/101_ObjectCategories'
def main():
import matplotlib.pyplot as plt
# c... |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
from sklearn.datasets import load_digits
import timeit
import bolt
# ================================================================ utils
def _dists_sq(X, q):
diffs = X - q
return np.sum(diffs * diffs, axis=-1)
def _dists_l... |
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2012 Keir Mierle <mierle@gmail.com>
#
# This Source Code Form is subject to the terms of the Mozilla
# Public License v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://m... |
# Intentionally empty
|
# -*- coding: utf-8 -*-
# This file is part of Eigen, a lightweight C++ template library
# for linear algebra.
#
# Copyright (C) 2009 Benjamin Schindler <bschindler@inf.ethz.ch>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
... |
from attention_tensorflow_mesh.attention_tensorflow_mesh import transformer_lm, transformer, attention |
import math
import mesh_tensorflow as mtf
import tensorflow.compat.v1 as tf
# helpers
def default(val, d):
return val if val is not None else d
# simple linear layer
def linear(x, dim_out, scope = 'linear', bias = True):
with tf.variable_scope(scope):
*_, dim_in = x.shape
w_init_stdev = 1 / ... |
# helpers
def exists(val):
return val is not None
def lcm(*numbers):
return int(functools.reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1))
def masked_mean(tensor, mask, dim = -1):
diff_len = len(tensor.shape) - len(mask.shape)
mask = mask[(..., *((None,) * diff_len))]
tensor.masked_fi... |
"""
Bonito Aligner
"""
def align_map(aligner, sequences, n_thread=4):
"""
Align `sequences` with minimap using `n_thread` threads.
"""
return ThreadMap(partial(MappyWorker, aligner), sequences, n_thread)
class MappyWorker(Thread):
"""
Process that reads items from an input_queue, applies a... |
"""
Bonito Fast5 Utils
"""
class Read:
def __init__(self, read, filename):
self.read_id = read.read_id
self.filename = filename.name
self.run_id = read.get_run_id()
if type(self.run_id) in (bytes, np.bytes_):
self.run_id = self.run_id.decode()
read_attrs = ... |
"""
Bonito utils
"""
try:
from claragenomics.bindings import cuda
from claragenomics.bindings.cudapoa import CudaPoaBatch
except ImportError:
pass
__dir__ = os.path.dirname(os.path.realpath(__file__))
__data__ = os.path.join(__dir__, "data")
__models__ = os.path.join(__dir__, "models")
__configs__ = os... |
"""
Bonito nn modules.
"""
layers = {}
def register(layer):
layer.name = layer.__name__.lower()
layers[layer.name] = layer
return layer
register(torch.nn.ReLU)
register(torch.nn.Tanh)
@register
class Swish(torch.nn.SiLU):
pass
@register
class Serial(torch.nn.Sequential):
def __init__(sel... |
"""
Bonito Input/Output
"""
logger = getLogger('bonito')
class CSVLogger:
def __init__(self, filename, sep=','):
self.filename = str(filename)
if os.path.exists(self.filename):
with open(self.filename) as f:
self.columns = csv.DictReader(f).fieldnames
else:... |
modules = [
'basecaller', 'train', 'evaluate', 'view', 'convert', 'download', 'export', 'duplex',
]
__version__ = '0.4.0'
def main():
parser = ArgumentParser(
'bonito',
formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-v', '--version', action='version',... |
"""
Bonito Multiprocesing
"""
def process_iter(iterator, maxsize=1):
"""
Take an iterator and run it on another process.
"""
return iter(ProcessIterator(iterator, maxsize=maxsize))
def thread_iter(iterator, maxsize=1):
"""
Take an iterator and run it on another thread.
"""
return it... |
"""
Bonito train
"""
class ChunkDataSet:
def __init__(self, chunks, targets, lengths):
self.chunks = np.expand_dims(chunks, axis=1)
self.targets = targets
self.lengths = lengths
def __getitem__(self, i):
return (
self.chunks[i].astype(np.float32),
sel... |
"""
Bonito Download
"""
class File:
"""
Small class for downloading models and training assets.
"""
__url__ = "https://nanoporetech.box.com/shared/static/"
def __init__(self, path, url_frag, force):
self.path = path
self.force = force
self.url = os.path.join(self.__url... |
#!/usr/bin/env python
"""
Convert a Taiyaki chunkify training file to set of Bonito CTC .npy files
"""
def align(samples, pointers, reference):
""" align to the start of the mapping """
squiggle_duration = len(samples)
mapped_off_the_start = len(pointers[pointers < 0])
mapped_off_the_end = len(poin... |
"""
Bonito Export
"""
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif is... |
"""
Bonito model viewer - display a model architecture for a given config.
"""
def main(args):
config = toml.load(args.config)
Model = load_symbol(config, "Model")
model = Model(config)
print(model)
print("Total parameters in model", sum(p.numel() for p in model.parameters()))
def argparser():
... |
"""
Bonito Basecaller
"""
def main(args):
if args.save_ctc and not args.reference:
sys.stderr.write("> a reference is needed to output ctc training data\n")
exit(1)
sys.stderr.write("> loading model\n")
model = load_model(args.model_directory, args.device, weights=int(args.weights))
... |
"""
Bonito Duplex consensus decoding.
https://www.biorxiv.org/content/10.1101/2020.02.25.956771v1
"""
def poagen(groups, gpu_percent=0.8):
free, total = cuda.cuda_get_mem_info(cuda.cuda_get_device())
gpu_mem_per_batch = gpu_percent * free
max_seq_sz = 0
max_sequences_per_poa = 0
for group i... |
#!/usr/bin/env python3
"""
Bonito training.
"""
def main(args):
workdir = os.path.expanduser(args.training_directory)
if os.path.exists(workdir) and not args.force:
print("[error] %s exists, use -f to force continue training." % workdir)
exit(1)
init(args.seed, args.device)
devic... |
"""
Bonito model evaluator
"""
def main(args):
poas = []
init(args.seed, args.device)
print("* loading data")
directory = args.directory
if os.path.exists(os.path.join(directory, 'validation')):
directory = os.path.join(directory, 'validation')
testdata = ChunkDataSet(
*... |
"""
Bonito CTC-CRF Model.
"""
def get_stride(m):
if hasattr(m, 'stride'):
return m.stride if isinstance(m.stride, int) else m.stride[0]
if isinstance(m, Convolution):
return get_stride(m.conv)
if isinstance(m, Serial):
return int(np.prod([get_stride(x) for x in m]))
return 1
... |
"""
Bonito CRF basecall
"""
def stitch(chunks, chunksize, overlap, length, stride, reverse=False):
"""
Stitch chunks together with a given overlap
"""
if isinstance(chunks, dict):
return {
k: stitch(v, chunksize, overlap, length, stride, reverse=reverse)
for k, v in c... |
"""
Bonito Model template
"""
class Model(Module):
"""
Model template for QuartzNet style architectures
https://arxiv.org/pdf/1910.10261.pdf
"""
def __init__(self, config):
super(Model, self).__init__()
if 'qscore' not in config:
self.qbias = 0.0
self.qsca... |
"""
Bonito basecall
"""
def basecall(model, reads, aligner=None, beamsize=5, chunksize=0, overlap=0, batchsize=1, qscores=False, reverse=None):
"""
Basecalls a set of reads.
"""
chunks = (
(read, chunk(torch.tensor(read.signal), chunksize, overlap)) for read in reads
)
scores = unbatc... |
# constants
FlashAttentionConfig = namedtuple('FlashAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
... |
# helper functions
def exists(val):
return val is not None
# norm
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale ... |
class BlackHole(object):
def __setattr__(self, name, value):
pass
def __call__(self, *args, **kwargs):
return self
def __getattr__(self, name):
return self
def seed_all(seed):
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed_a... |
NON_STANDARD_SUBSTITUTIONS = {
'2AS':'ASP', '3AH':'HIS', '5HP':'GLU', 'ACL':'ARG', 'AGM':'ARG', 'AIB':'ALA', 'ALM':'ALA', 'ALO':'THR', 'ALY':'LYS', 'ARM':'ARG',
'ASA':'ASP', 'ASB':'ASP', 'ASK':'ASP', 'ASL':'ASP', 'ASQ':'ASP', 'AYA':'ALA', 'BCS':'CYS', 'BHD':'ASP', 'BMT':'THR', 'BNN':'ALA',
'BUC':'CYS', 'B... |
class PaddingCollate(object):
def __init__(self, length_ref_key='mutation_mask', pad_values={'aa': 20, 'pos14': float('999'), 'icode': ' ', 'chain_id': '-'}, donot_pad={'foldx'}, eight=False):
super().__init__()
self.length_ref_key = length_ref_key
self.pad_values = pad_values
se... |
class ComplexEncoder(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.relpos_embedding = nn.Embedding(cfg.max_relpos*2+2, cfg.pair_feat_dim)
self.residue_encoder = PerResidueEncoder(cfg.node_feat_dim)
if cfg.geomattn is not None:
se... |
def _alpha_from_logits(logits, mask, inf=1e5):
"""
Args:
logits: Logit matrices, (N, L_i, L_j, num_heads).
mask: Masks, (N, L).
Returns:
alpha: Attention weights.
"""
N, L, _, _ = logits.size()
mask_row = mask.view(N, L, 1, 1).expand_as(logits) # (N, L, *, *)
... |
class PerResidueEncoder(nn.Module):
def __init__(self, feat_dim):
super().__init__()
self.aatype_embed = nn.Embedding(21, feat_dim)
self.torsion_embed = PositionalEncoding()
self.mlp = nn.Sequential(
nn.Linear(21*14*3 + feat_dim, feat_dim * 2), nn.ReLU(),
... |
def get_pos_CB(pos14, atom_mask):
"""
Args:
pos14: (N, L, 14, 3)
atom_mask: (N, L, 14)
"""
N, L = pos14.shape[:2]
mask_CB = atom_mask[:, :, ATOM_CB] # (N, L)
mask_CB = mask_CB[:, :, None].expand(N, L, 3)
pos_CA = pos14[:, :, ATOM_CA] # (N, L, 3)
pos_CB = pos14[:, ... |
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('wt_pdb', type=str)
parser.add_argument('mut_pdb', type=str)
parser.add_argument('--model', type=str, default='./data/model.pt')
parser.add_argument('--dev... |
AoA = AttentionOnAttention
|
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class AttentionOnAttention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
dropout = 0.,
aoa_dropout = 0.
):
super().__init_... |
# helpers
def exists(val):
return val is not None
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self... |
logging.set_verbosity_error()
def exists(val):
return val is not None
def map_values(fn, dictionary):
return {k: fn(v) for k, v in dictionary.items()}
CONTEXT_EMBED_USE_CPU = os.getenv('CONTEXT_EMBED_USE_CPU', None) is not None
if CONTEXT_EMBED_USE_CPU:
print('calculating context embed only on cpu')
M... |
# helpers functions
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def cycle(dl):
while True:
for data in dl:
yield data
def has_int_squareroot(num):
return (math.sqrt(num) ** 2) == num
def... |
terminate = False
def signal_handling(signum,frame):
global terminate
terminate = True
num_attempts = 4
for attempt in range(num_attempts):
dream = Imagine(
text = "an armchair in the form of pikachu\\an armchair imitating pikachu\\abstract",
text_min = "blur\\zoom",
lr = 7e-2,
... |
__version__ = '0.9.1'
|
"""Good differentiable image resampling for PyTorch."""
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
... |
# Exponential Moving Average (from https://gist.github.com/crowsonkb/76b94d5238272722290734bf4725d204)
"""Exponential moving average for PyTorch. Adapted from
https://www.zijianhu.com/post/pytorch/ema/ by crowsonkb
"""
class EMA(nn.Module):
def __init__(self, model, decay):
super().__init__()
sel... |
# this code is a copy from huggingface
# with some minor modifications
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BIGGAN_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE',
... |
def train(
text=None,
img=None,
text_min="",
lr = .07,
image_size = 512,
gradient_accumulate_every = 1,
epochs = 20,
iterations = 1050,
save_every = 50,
overwrite = False,
save_progress = False,
save_date_time = False,
bilinear = False,
open_folder = True,
s... |
assert torch.cuda.is_available(), 'CUDA must be available in order to use Big Sleep'
# graceful keyboard interrupt
terminate = False
def signal_handling(signum,frame):
print('detecting keyboard interrupt, gracefully exiting')
global terminate
terminate = True
signal.signal(signal.SIGINT,signal_handl... |
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipub... |
class AxialPositionalEmbedding(nn.Module):
def __init__(self, dim, axial_shape, axial_dims = None):
super().__init__()
self.dim = dim
self.shape = axial_shape
self.max_seq_len = reduce(mul, axial_shape, 1)
self.summed = axial_dims is None
axial_dims = ((dim,) * len... |
# constants
DEVICE = None # defaults to cuda if available, else cpu
NUM_BATCHES = int(1e5)
GRADIENT_ACCUMULATE_EVERY = 16
LEARNING_RATE = 3e-4
IGNORE_INDEX = -100
THRESHOLD_LENGTH = 250
# set device
DISTOGRAM_BUCKETS = constants.DISTOGRAM_BUCKETS
DEVICE = constants.DEVICE
# helpers
def cycle(loader, cond = lambd... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.