{ "_name_or_path": "google/vit-base-patch16-224", "architectures": [ "ViTForImageClassification" ], "attention_probs_dropout_prob": 0.0, "encoder_stride": 16, "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "hidden_size": 768, "id2label": { "0": "Beige", "1": "Black", "2": "Black and White", "3": "Blue", "4": "Brown", "5": "Deep Beige", "6": "Deep Brown", "7": "Deep Gray", "8": "Gray", "9": "Green", "10": "Ivory", "11": "Light Beige", "12": "Light Brown", "13": "Light Gray", "14": "Orange", "15": "Pink", "16": "Purple", "17": "Red", "18": "White", "19": "Yellow" }, "image_size": 224, "initializer_range": 0.02, "intermediate_size": 3072, "label2id": { "Beige": 0, "Black": 1, "Black and White": 2, "Blue": 3, "Brown": 4, "Deep Beige": 5, "Deep Brown": 6, "Deep Gray": 7, "Gray": 8, "Green": 9, "Ivory": 10, "Light Beige": 11, "Light Brown": 12, "Light Gray": 13, "Orange": 14, "Pink": 15, "Purple": 16, "Red": 17, "White": 18, "Yellow": 19 }, "layer_norm_eps": 1e-12, "model_type": "vit", "num_attention_heads": 12, "num_channels": 3, "num_hidden_layers": 12, "patch_size": 16, "problem_type": "single_label_classification", "qkv_bias": true, "torch_dtype": "float32", "transformers_version": "4.37.0" }