File size: 1,152 Bytes
f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 39231e6 f5490a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
{
"_name_or_path": "NehaBardeDUKE/autotrain-ai-generated-image-classification-3250490787",
"architectures": [
"SwinForImageClassification"
],
"attention_probs_dropout_prob": 0.0,
"depths": [
2,
2,
6,
2
],
"drop_path_rate": 0.1,
"embed_dim": 96,
"encoder_stride": 32,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_size": 768,
"id2label": {
"0": "Artificial",
"1": "Human"
},
"image_size": 224,
"initializer_range": 0.02,
"label2id": {
"Artificial": "0",
"Human": "1"
},
"layer_norm_eps": 1e-05,
"max_length": 128,
"mlp_ratio": 4.0,
"model_type": "swin",
"num_channels": 3,
"num_heads": [
3,
6,
12,
24
],
"num_layers": 4,
"out_features": [
"stage4"
],
"out_indices": [
4
],
"padding": "max_length",
"patch_size": 4,
"path_norm": true,
"problem_type": "single_label_classification",
"qkv_bias": true,
"stage_names": [
"stem",
"stage1",
"stage2",
"stage3",
"stage4"
],
"torch_dtype": "float32",
"transformers_version": "4.41.1",
"use_absolute_embeddings": false,
"window_size": 7
}
|