{ "_name_or_path": "AutoTrain", "architectures": [ "ViTForImageClassification" ], "attention_probs_dropout_prob": 0.0, "encoder_stride": 16, "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "hidden_size": 768, "id2label": { "0": "\ub0b4\ucd94\ub7f4", "1": "\ub7ec\ube14\ub9ac\ub85c\ub9e8\ud2f1", "10": "\ud55c\uad6d\uc544\uc2dc\uc544", "2": "\ubaa8\ub358", "3": "\ubbf8\ub2c8\uba40\uc2ec\ud50c", "4": "\ubd81\uc720\ub7fd", "5": "\ube48\ud2f0\uc9c0\ub808\ud2b8\ub85c", "6": "\uc720\ub2c8\ud06c", "7": "\uc778\ub354\uc2a4\ud2b8\ub9ac\uc5bc", "8": "\ud074\ub798\uc2dd\uc564\ud2f1", "9": "\ud504\ub80c\uce58\ud504\ub85c\ubc29\uc2a4" }, "image_size": 224, "initializer_range": 0.02, "intermediate_size": 3072, "label2id": { "\ub0b4\ucd94\ub7f4": "0", "\ub7ec\ube14\ub9ac\ub85c\ub9e8\ud2f1": "1", "\ubaa8\ub358": "2", "\ubbf8\ub2c8\uba40\uc2ec\ud50c": "3", "\ubd81\uc720\ub7fd": "4", "\ube48\ud2f0\uc9c0\ub808\ud2b8\ub85c": "5", "\uc720\ub2c8\ud06c": "6", "\uc778\ub354\uc2a4\ud2b8\ub9ac\uc5bc": "7", "\ud074\ub798\uc2dd\uc564\ud2f1": "8", "\ud504\ub80c\uce58\ud504\ub85c\ubc29\uc2a4": "9", "\ud55c\uad6d\uc544\uc2dc\uc544": "10" }, "layer_norm_eps": 1e-12, "max_length": 128, "model_type": "vit", "num_attention_heads": 12, "num_channels": 3, "num_hidden_layers": 12, "padding": "max_length", "patch_size": 16, "problem_type": "single_label_classification", "qkv_bias": true, "torch_dtype": "float32", "transformers_version": "4.29.2" }