{ "_name_or_path": "openai/clip-vit-large-patch14", "architectures": [ "CLIPForImageClassification" ], "id2label": { "0": "Erythromelal", "1": "Guttate", "2": "Inverse", "3": "Nail", "4": "Normal", "5": "Plaque", "6": "Psoriatic Arthritis", "7": "Pustular" }, "initializer_factor": 1.0, "label2id": { "Erythromelal": 0, "Guttate": 1, "Inverse": 2, "Nail": 3, "Normal": 4, "Plaque": 5, "Psoriatic Arthritis": 6, "Pustular": 7 }, "logit_scale_init_value": 2.6592, "model_type": "clip", "problem_type": "single_label_classification", "projection_dim": 768, "text_config": { "dropout": 0.0, "hidden_size": 768, "intermediate_size": 3072, "model_type": "clip_text_model", "num_attention_heads": 12, "projection_dim": 768 }, "torch_dtype": "float32", "transformers_version": "4.39.3", "vision_config": { "dropout": 0.0, "hidden_size": 1024, "intermediate_size": 4096, "model_type": "clip_vision_model", "num_attention_heads": 16, "num_hidden_layers": 24, "patch_size": 14, "projection_dim": 768 } }