{ "_name_or_path": "openai/clip-vit-base-patch32", "architectures": [ "CLIPForImageClassification" ], "id2label": { "0": "airplane", "1": "automobile", "2": "bird", "3": "cat", "4": "deer", "5": "dog", "6": "frog", "7": "horse", "8": "ship", "9": "truck" }, "initializer_factor": 1.0, "label2id": { "airplane": 0, "automobile": 1, "bird": 2, "cat": 3, "deer": 4, "dog": 5, "frog": 6, "horse": 7, "ship": 8, "truck": 9 }, "logit_scale_init_value": 2.6592, "model_type": "clip", "problem_type": "single_label_classification", "projection_dim": 512, "text_config": { "bos_token_id": 0, "dropout": 0.0, "eos_token_id": 2, "model_type": "clip_text_model" }, "torch_dtype": "float32", "transformers_version": "4.38.2", "vision_config": { "dropout": 0.0, "model_type": "clip_vision_model" } }