train17 / config.json
shubhamWi91's picture
Training in progress, epoch 1
a3a9237
{
"_name_or_path": "jozhang97/deta-swin-large-o365",
"activation_dropout": 0.0,
"activation_function": "relu",
"architectures": [
"DetaForObjectDetection"
],
"assign_first_stage": true,
"attention_dropout": 0.0,
"auxiliary_loss": false,
"backbone_config": {
"attention_probs_dropout_prob": 0.0,
"depths": [
2,
2,
18,
2
],
"drop_path_rate": 0.1,
"embed_dim": 192,
"encoder_stride": 32,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"hidden_size": 1536,
"image_size": 224,
"initializer_range": 0.02,
"layer_norm_eps": 1e-05,
"mlp_ratio": 4.0,
"model_type": "swin",
"num_heads": [
6,
12,
24,
48
],
"num_layers": 4,
"out_features": [
"stage2",
"stage3",
"stage4"
],
"out_indices": [
2,
3,
4
],
"patch_size": 4,
"path_norm": true,
"qkv_bias": true,
"use_absolute_embeddings": false,
"window_size": 12
},
"bbox_cost": 5,
"bbox_loss_coefficient": 5,
"class_cost": 1,
"d_model": 256,
"decoder_attention_heads": 8,
"decoder_ffn_dim": 2048,
"decoder_layerdrop": 0.0,
"decoder_layers": 6,
"decoder_n_points": 4,
"dice_loss_coefficient": 1,
"dropout": 0.1,
"encoder_attention_heads": 8,
"encoder_ffn_dim": 2048,
"encoder_layerdrop": 0.0,
"encoder_layers": 6,
"encoder_n_points": 4,
"eos_coefficient": 0.1,
"focal_alpha": 0.25,
"giou_cost": 2,
"giou_loss_coefficient": 2,
"id2label": {
"0": "pet_bottle",
"1": "hm_ldpe",
"2": "pp_w",
"3": "ldpe_wrapper",
"4": "hdpe_bottle",
"5": "paper",
"6": "pp",
"7": "aluminium_foil",
"8": "multilayer_plastic",
"9": "ps",
"10": "cardboard",
"11": "blister_pack",
"12": "aluminium_can",
"13": "tetrapack",
"14": "others"
},
"init_std": 0.02,
"init_xavier_std": 1.0,
"is_encoder_decoder": true,
"label2id": {
"aluminium_can": 12,
"aluminium_foil": 7,
"blister_pack": 11,
"cardboard": 10,
"hdpe_bottle": 4,
"hm_ldpe": 1,
"ldpe_wrapper": 3,
"multilayer_plastic": 8,
"others": 14,
"paper": 5,
"pet_bottle": 0,
"pp": 6,
"pp_w": 2,
"ps": 9,
"tetrapack": 13
},
"mask_loss_coefficient": 1,
"max_position_embeddings": 2048,
"model_type": "deta",
"num_feature_levels": 5,
"num_queries": 900,
"position_embedding_type": "sine",
"torch_dtype": "float32",
"transformers_version": "4.32.1",
"two_stage": true,
"two_stage_num_proposals": 300,
"with_box_refine": true
}