File size: 396 Bytes
c5d43bc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
#!/usr/bin/env python3
from transformers import CLIPTextConfig, CLIPModel
import torch
config = CLIPTextConfig.from_pretrained("openMUSE/CLIP-ViT-L-14-DataComp.XL-s13B-b90K-penultimate")
model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K", text_config=config)
model.to_bettertransformer()
text_encoder = model.text_model
text_encoder = torch.compile(text_encoder)
|