{ "_name_or_path": "mattmdjaga/segformer_b2_clothes", "architectures": [ "SegformerForSemanticSegmentation" ], "attention_probs_dropout_prob": 0.0, "classifier_dropout_prob": 0.1, "decoder_hidden_size": 768, "depths": [ 3, 4, 6, 3 ], "downsampling_rates": [ 1, 4, 8, 16 ], "drop_path_rate": 0.1, "hidden_act": "gelu", "hidden_dropout_prob": 0.0, "hidden_sizes": [ 64, 128, 320, 512 ], "id2label": { "0": "background", "1": "upper_torso", "2": "left_pants", "3": "right_patns", "4": "skirts", "5": "left_sleeve", "6": "right_sleeve", "7": "outer_collar", "8": "inner_collar" }, "image_size": 224, "initializer_range": 0.02, "label2id": { "background": 0, "inner_collar": 8, "left_pants": 2, "left_sleeve": 5, "outer_collar": 7, "right_patns": 3, "right_sleeve": 6, "skirts": 4, "upper_torso": 1 }, "layer_norm_eps": 1e-06, "mlp_ratios": [ 4, 4, 4, 4 ], "model_type": "segformer", "num_attention_heads": [ 1, 2, 5, 8 ], "num_channels": 3, "num_encoder_blocks": 4, "patch_sizes": [ 7, 3, 3, 3 ], "reshape_last_stage": true, "semantic_loss_ignore_index": 255, "sr_ratios": [ 8, 4, 2, 1 ], "strides": [ 4, 2, 2, 2 ], "torch_dtype": "float32", "transformers_version": "4.35.2" }