{ "_name_or_path": "AutoTrain", "_num_labels": 35, "architectures": [ "BertForSequenceClassification" ], "attention_probs_dropout_prob": 0.1, "classifier_dropout": null, "directionality": "bidi", "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "id2label": { "0": "\u0621", "1": "\u0624", "2": "\u0627", "3": "\u0628", "4": "\u062a", "5": "\u062b", "6": "\u062c", "7": "\u062d", "8": "\u062e", "9": "\u062f", "10": "\u0630", "11": "\u0631", "12": "\u0632", "13": "\u0633", "14": "\u0634", "15": "\u0635", "16": "\u0636", "17": "\u0637", "18": "\u0637\u0646", "19": "\u0638", "20": "\u0639", "21": "\u063a", "22": "\u0641", "23": "\u0642", "24": "\u0643", "25": "\u0644", "26": "\u0644\u0627", "27": "\u0645", "28": "\u0646", "29": "\u0647", "30": "\u0647\u0640", "31": "\u0647\u0646", "32": "\u0648", "33": "\u0649", "34": "\u064a" }, "initializer_range": 0.02, "intermediate_size": 3072, "label2id": { "\u0621": 0, "\u0624": 1, "\u0627": 2, "\u0628": 3, "\u062a": 4, "\u062b": 5, "\u062c": 6, "\u062d": 7, "\u062e": 8, "\u062f": 9, "\u0630": 10, "\u0631": 11, "\u0632": 12, "\u0633": 13, "\u0634": 14, "\u0635": 15, "\u0636": 16, "\u0637": 17, "\u0637\u0646": 18, "\u0638": 19, "\u0639": 20, "\u063a": 21, "\u0641": 22, "\u0642": 23, "\u0643": 24, "\u0644": 25, "\u0644\u0627": 26, "\u0645": 27, "\u0646": 28, "\u0647": 29, "\u0647\u0640": 30, "\u0647\u0646": 31, "\u0648": 32, "\u0649": 33, "\u064a": 34 }, "layer_norm_eps": 1e-12, "max_length": 64, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "padding": "max_length", "pooler_fc_size": 768, "pooler_num_attention_heads": 12, "pooler_num_fc_layers": 3, "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", "transformers_version": "4.15.0", "type_vocab_size": 2, "use_cache": true, "vocab_size": 50000 }