{ "_name_or_path": "AutoTrain", "_num_labels": 8, "architectures": [ "BertForSequenceClassification" ], "attention_probs_dropout_prob": 0.1, "classifier_dropout": null, "directionality": "bidi", "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 1024, "id2label": { "0": "Component", "1": "Do example", "2": "Don't example", "3": "Educative", "4": "Rationale", "5": "States", "6": "Variant rationale", "7": "Variants" }, "initializer_range": 0.02, "intermediate_size": 4096, "label2id": { "Component": 0, "Do example": 1, "Don't example": 2, "Educative": 3, "Rationale": 4, "States": 5, "Variant rationale": 6, "Variants": 7 }, "layer_norm_eps": 1e-12, "max_length": 96, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 16, "num_hidden_layers": 24, "pad_token_id": 0, "padding": "max_length", "pooler_fc_size": 768, "pooler_num_attention_heads": 12, "pooler_num_fc_layers": 3, "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", "transformers_version": "4.29.2", "type_vocab_size": 2, "use_cache": true, "vocab_size": 28996 }