{ | |
"_name_or_path": "openai/clip-vit-large-patch14", | |
"architectures": [ | |
"CLIPForImageClassification" | |
], | |
"id2label": { | |
"0": "T - shirt / top", | |
"1": "Trouser", | |
"2": "Pullover", | |
"3": "Dress", | |
"4": "Coat", | |
"5": "Sandal", | |
"6": "Shirt", | |
"7": "Sneaker", | |
"8": "Bag", | |
"9": "Ankle boot" | |
}, | |
"initializer_factor": 1.0, | |
"label2id": { | |
"Ankle boot": "9", | |
"Bag": "8", | |
"Coat": "4", | |
"Dress": "3", | |
"Pullover": "2", | |
"Sandal": "5", | |
"Shirt": "6", | |
"Sneaker": "7", | |
"T - shirt / top": "0", | |
"Trouser": "1" | |
}, | |
"logit_scale_init_value": 2.6592, | |
"model_type": "clip", | |
"problem_type": "single_label_classification", | |
"projection_dim": 768, | |
"text_config": { | |
"dropout": 0.0, | |
"hidden_size": 768, | |
"intermediate_size": 3072, | |
"model_type": "clip_text_model", | |
"num_attention_heads": 12, | |
"projection_dim": 768 | |
}, | |
"torch_dtype": "float32", | |
"transformers_version": "4.40.1", | |
"vision_config": { | |
"dropout": 0.0, | |
"hidden_size": 1024, | |
"intermediate_size": 4096, | |
"model_type": "clip_vision_model", | |
"num_attention_heads": 16, | |
"num_hidden_layers": 24, | |
"patch_size": 14, | |
"projection_dim": 768 | |
} | |
} | |