import torch
from udev.models.amplify.modeling_amplify import AMPLIFY

device = torch.device("cuda") # only cuda is supported
model = AMPLIFY.from_pretrained('GleghornLab/AMPLIFY_120M', token=token).to(device)
tokenizer = EsmTokenizer.from_pretrained('GleghornLab/AMPLIFY_120M', token=token)

sequences = ['SEQWENCE', 'MEAEGAVE'] # list of seqs: str
tokens = tokenizer(sequences, return_tensors='pt', padding=True, pad_to_multiple_of=8)
tokens = {k: v.to(device) for k, v in tokens.items()}

out = model(
    src=tokens['input_ids'],
    pad_mask=tokens['attention_mask'].float(),
    output_hidden_states=True,
    output_attentions=True
)
Downloads last month
12
Safetensors
Model size
118M params
Tensor type
F32
·
Inference Providers NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API: The model has no pipeline_tag.