{ "_name_or_path": "CAMeL-Lab/bert-base-arabic-camelbert-msa", "architectures": [ "BertForSequenceClassification" ], "attention_probs_dropout_prob": 0.1, "classifier_dropout": null, "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "id2label": { "0": "OM - \u0639\u0645\u0627\u0646", "1": "SD - \u0627\u0644\u0633\u0648\u062f\u0627\u0646", "2": "SA - \u0627\u0644\u0633\u0639\u0648\u062f\u064a\u0629", "3": "KW - \u0627\u0644\u0643\u0648\u064a\u062a", "4": "QA - \u0642\u0637\u0631", "5": "LB - \u0644\u064a\u0628\u064a\u0627", "6": "JO - \u0627\u0644\u0623\u0631\u062f\u0646", "7": "SY - \u0633\u0648\u0631\u064a\u0627", "8": "IQ - \u0627\u0644\u0639\u0631\u0627\u0642", "9": "MA - \u0627\u0644\u0645\u063a\u0631\u0628", "10": "EG - \u0645\u0635\u0631", "11": "PL - \u0641\u0644\u0633\u0637\u064a\u0646", "12": "YE - \u0627\u0644\u064a\u0645\u0646", "13": "BH - \u0627\u0644\u0628\u062d\u0631\u064a\u0646", "14": "DZ - \u0627\u0644\u062c\u0632\u0627\u0626\u0631", "15": "AE - \u0627\u0644\u0625\u0645\u0627\u0631\u0627\u062a", "16": "TN - \u062a\u0648\u0646\u0633", "17": "LY - \u0644\u0628\u0646\u0627\u0646" }, "initializer_range": 0.02, "intermediate_size": 3072, "label2id": { "LABEL_0": 0, "LABEL_1": 1, "LABEL_10": 10, "LABEL_11": 11, "LABEL_12": 12, "LABEL_13": 13, "LABEL_14": 14, "LABEL_15": 15, "LABEL_16": 16, "LABEL_17": 17, "LABEL_2": 2, "LABEL_3": 3, "LABEL_4": 4, "LABEL_5": 5, "LABEL_6": 6, "LABEL_7": 7, "LABEL_8": 8, "LABEL_9": 9 }, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", "transformers_version": "4.20.1", "type_vocab_size": 2, "use_cache": true, "vocab_size": 30000 }