OpenCLIP
PyTorch
clip

ERROR:root:Model config for open_clip_pytorch_model.bin not found;

#7
by RichardBrownLu - opened

model_catalog.png

/data/transfer1_lyq/data_filter_test/data_filter_network$ python3 test_model_demo_01.py

ERROR:root:Model config for open_clip_pytorch_model.bin not found; available models ['coca_base', 'coca_roberta-ViT-B-32', 'coca_ViT-B-32', 'coca_ViT-L-14', 'convnext_base', 'convnext_base_w', 'convnext_base_w_320', 'convnext_large', 'convnext_large_d', 'convnext_large_d_320', 'convnext_small', 'convnext_tiny', 'convnext_xlarge', 'convnext_xxlarge', 'convnext_xxlarge_320', 'EVA01-g-14', 'EVA01-g-14-plus', 'EVA02-B-16', 'EVA02-E-14', 'EVA02-E-14-plus', 'EVA02-L-14', 'EVA02-L-14-336', 'mt5-base-ViT-B-32', 'mt5-xl-ViT-H-14', 'nllb-clip-base', 'nllb-clip-base-siglip', 'nllb-clip-large', 'nllb-clip-large-siglip', 'RN50', 'RN50-quickgelu', 'RN50x4', 'RN50x16', 'RN50x64', 'RN101', 'RN101-quickgelu', 'roberta-ViT-B-32', 'swin_base_patch4_window7_224', 'ViT-B-16', 'ViT-B-16-plus', 'ViT-B-16-plus-240', 'ViT-B-16-quickgelu', 'ViT-B-16-SigLIP', 'ViT-B-16-SigLIP-256', 'ViT-B-16-SigLIP-384', 'ViT-B-16-SigLIP-512', 'ViT-B-16-SigLIP-i18n-256', 'ViT-B-32', 'ViT-B-32-256', 'ViT-B-32-plus-256', 'ViT-B-32-quickgelu', 'ViT-bigG-14', 'ViT-bigG-14-CLIPA', 'ViT-bigG-14-CLIPA-336', 'ViT-e-14', 'ViT-g-14', 'ViT-H-14', 'ViT-H-14-378-quickgelu', 'ViT-H-14-CLIPA', 'ViT-H-14-CLIPA-336', 'ViT-H-14-quickgelu', 'ViT-H-16', 'ViT-L-14', 'ViT-L-14-280', 'ViT-L-14-336', 'ViT-L-14-CLIPA', 'ViT-L-14-CLIPA-336', 'ViT-L-14-quickgelu', 'ViT-L-16', 'ViT-L-16-320', 'ViT-L-16-SigLIP-256', 'ViT-L-16-SigLIP-384', 'ViT-M-16', 'ViT-M-16-alt', 'ViT-M-32', 'ViT-M-32-alt', 'ViT-S-16', 'ViT-S-16-alt', 'ViT-S-32', 'ViT-S-32-alt', 'ViT-SO400M-14-SigLIP', 'ViT-SO400M-14-SigLIP-384', 'vit_medium_patch16_gap_256', 'vit_relpos_medium_patch16_cls_224', 'xlm-roberta-base-ViT-B-32', 'xlm-roberta-large-ViT-H-14'].
Traceback (most recent call last):
File "test_model_demo_01.py", line 9, in
model, preprocess = create_model_from_pretrained('open_clip_pytorch_model.bin')
File "/data/miniconda3/envs/dfn_env/lib/python3.7/site-packages/open_clip/factory.py", line 449, in create_model_from_pretrained
**model_kwargs,
File "/data/miniconda3/envs/dfn_env/lib/python3.7/site-packages/open_clip/factory.py", line 215, in create_model
raise RuntimeError(f'Model config for {model_name} not found.')
RuntimeError: Model config for open_clip_pytorch_model.bin not found.

import torch
import torch.nn.functional as F
from urllib.request import urlopen
from PIL import Image
from open_clip import create_model_from_pretrained, get_tokenizer

model, preprocess = create_model_from_pretrained('/data/transfer1_lyq/data_filter_test/data_filter_network')
tokenizer = get_tokenizer('ViT-H-14')

image = Image.open(urlopen(
'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png'
))
image = preprocess(image).unsqueeze(0)

labels_list = ["a dog", "a cat", "a donut", "a beignet"]
text = tokenizer(labels_list, context_length=model.context_length)

with torch.no_grad(), torch.cuda.amp.autocast():
image_features = model.encode_image(image)
text_features = model.encode_text(text)
image_features = F.normalize(image_features, dim=-1)
text_features = F.normalize(text_features, dim=-1)

text_probs = torch.sigmoid(image_features @ text_features.T * model.logit_scale.exp() + model.logit_bias)

zipped_list = list(zip(labels_list, [round(p.item(), 3) for p in text_probs[0]]))
print("Label probabilities: ", zipped_list)

Sign up or log in to comment