{ "architectures": [ "ViTForImageClassification" ], "attention_probs_dropout_prob": 0.0, "encoder_stride": 16, "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "hidden_size": 768, "id2label": { "0": "call", "1": "dislike", "2": "fist", "3": "four", "4": "like", "5": "mute", "6": "ok", "7": "one", "8": "palm", "9": "peace", "10": "peace_inverted", "11": "rock", "12": "stop", "13": "stop_inverted", "14": "three", "15": "three2", "16": "two_up", "17": "two_up_inverted" }, "image_size": 224, "initializer_range": 0.02, "intermediate_size": 3072, "label2id": { "call": 0, "dislike": 1, "fist": 2, "four": 3, "like": 4, "mute": 5, "ok": 6, "one": 7, "palm": 8, "peace": 9, "peace_inverted": 10, "rock": 11, "stop": 12, "stop_inverted": 13, "three": 14, "three2": 15, "two_up": 16, "two_up_inverted": 17 }, "layer_norm_eps": 1e-12, "model_type": "vit", "num_attention_heads": 12, "num_channels": 3, "num_hidden_layers": 12, "patch_size": 16, "problem_type": "single_label_classification", "qkv_bias": true, "torch_dtype": "float32", "transformers_version": "4.41.0" }