bakrianoo commited on
Commit
7c71c1b
1 Parent(s): 459635e

upload the first version

Browse files
READMR.md ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: ar
3
+ datasets:
4
+ - Marefa-NER
5
+ ---
6
+
7
+ # Marefa NER نموذج المعرفة لتصنيف أجزاء النص
8
+
9
+ ## Model description
10
+
11
+ **Marefa-NER** is a Large Arabic NER model which built on completely new dataset and targets to extract up to 9 different types of entities
12
+ ```
13
+ Person, Location, Organization, Nationality, Job, Product, Event, Time, Art-Work
14
+ ```
15
+
16
+ نموذج المعرفة لتصنيف أجزاء النص. نموذج جديد كليا من حيث البيانات المستخدمة في تدريب النموذج.
17
+ كذلك يستهدف النموذج تصنيف حتى 9 أنواع مختلفة من أجزاء النص
18
+ ```
19
+ شخص - مكان - منظمة - جنسية - وظيفة - منتج - حدث - توقيت - عمل إبداعي
20
+ ```
21
+
22
+ ## How to use كيف تستخدم النموذج
23
+
24
+ Install transformers
25
+
26
+ `$ pip3 install transformers==4.3.0`
27
+
28
+ > If you are using `Google Colab`, please restart your runtime after installing the packages.
29
+
30
+ -----------
31
+
32
+ ```python
33
+ from transformers import AutoTokenizer, AutoModelForTokenClassification
34
+ from transformers import pipeline
35
+
36
+ # ===== import the model
37
+ m_name = "marefa-nlp/marefa-ner"
38
+ tokenizer = AutoTokenizer.from_pretrained(m_name)
39
+ model = AutoModelForTokenClassification.from_pretrained(m_name)
40
+
41
+ # ===== build the NER pipeline
42
+ nlp = pipeline("ner", model=model, tokenizer=tokenizer, grouped_entities=True)
43
+
44
+ # ===== extract the entities from a sample text
45
+ example = 'قاد عمر المختار القوات في ليبيا ضد الجيش الإيطالي'
46
+ ner_results = nlp(example)
47
+
48
+ # ===== print the ner_results
49
+ for ent in ner_results:
50
+ print(ent["word"], '->' ,ent['entity_group'], " # score:", "%.2f" % ent['score'])
51
+
52
+ #####
53
+ # عمر المختار -> person # score: 1.00
54
+ # ليبيا -> location # score: 0.99
55
+ # الجيش الإيطالي -> organization # score: 0.99
56
+ ####
57
+
58
+ ```
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "marefa-ner",
3
+ "architectures": [
4
+ "XLMRobertaForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "id2label": {
14
+ "0": "O", "1": "B-job", "2": "I-job", "3": "B-nationality", "4": "B-person", "5": "I-person", "6": "B-location", "7": "B-time", "8": "I-time", "9": "B-event", "10": "I-event", "11": "B-organization", "12": "I-organization", "13": "I-location", "14": "I-nationality", "15": "B-product", "16": "I-product", "17": "B-artwork", "18": "I-artwork"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 4096,
18
+ "label2id": {
19
+ "O": 0, "B-job": 1, "I-job": 2, "B-nationality": 3, "B-person": 4, "I-person": 5, "B-location": 6, "B-time": 7, "I-time": 8, "B-event": 9, "I-event": 10, "B-organization": 11, "I-organization": 12, "I-location": 13, "I-nationality": 14, "B-product": 15, "I-product": 16, "B-artwork": 17, "I-artwork": 18
20
+ },
21
+ "layer_norm_eps": 1e-05,
22
+ "max_position_embeddings": 514,
23
+ "model_type": "xlm-roberta",
24
+ "num_attention_heads": 16,
25
+ "num_hidden_layers": 24,
26
+ "output_past": true,
27
+ "pad_token_id": 1,
28
+ "position_embedding_type": "absolute",
29
+ "transformers_version": "4.3.2",
30
+ "type_vocab_size": 1,
31
+ "use_cache": true,
32
+ "vocab_size": 250002
33
+ }
model_args.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"adam_epsilon": 1e-08, "best_model_dir": "outputs/best_model", "cache_dir": "cache_dir/", "config": {}, "cosine_schedule_num_cycles": 0.5, "custom_layer_parameters": [], "custom_parameter_groups": [], "dataloader_num_workers": 0, "do_lower_case": false, "dynamic_quantize": false, "early_stopping_consider_epochs": false, "early_stopping_delta": 0, "early_stopping_metric": "eval_loss", "early_stopping_metric_minimize": true, "early_stopping_patience": 3, "encoding": null, "adafactor_eps": [1e-30, 0.001], "adafactor_clip_threshold": 1.0, "adafactor_decay_rate": -0.8, "adafactor_beta1": null, "adafactor_scale_parameter": true, "adafactor_relative_step": true, "adafactor_warmup_init": true, "eval_batch_size": 8, "evaluate_during_training": false, "evaluate_during_training_silent": true, "evaluate_during_training_steps": 2000, "evaluate_during_training_verbose": false, "evaluate_each_epoch": true, "fp16": true, "gradient_accumulation_steps": 1, "learning_rate": 2e-05, "local_rank": -1, "logging_steps": 50, "manual_seed": null, "max_grad_norm": 1.0, "max_seq_length": 128, "model_name": "xlm-roberta-large", "model_type": "xlmroberta", "multiprocessing_chunksize": 500, "n_gpu": 1, "no_cache": false, "no_save": false, "not_saved_args": [], "num_train_epochs": 10, "optimizer": "AdamW", "output_dir": "outputs/", "overwrite_output_dir": true, "process_count": 1, "polynomial_decay_schedule_lr_end": 1e-07, "polynomial_decay_schedule_power": 1.0, "quantized_model": false, "reprocess_input_data": true, "save_best_model": true, "save_eval_checkpoints": true, "save_model_every_epoch": true, "save_optimizer_and_scheduler": true, "save_steps": 2000, "scheduler": "linear_schedule_with_warmup", "silent": false, "skip_special_tokens": true, "tensorboard_dir": null, "thread_count": null, "train_batch_size": 16, "train_custom_parameters_only": false, "use_cached_eval_features": false, "use_early_stopping": false, "use_multiprocessing": true, "wandb_kwargs": {}, "wandb_project": null, "warmup_ratio": 0.06, "warmup_steps": 563, "weight_decay": 0.001, "model_class": "NERModel", "classification_report": false, "labels_list": ["O", "B-job", "I-job", "B-nationality", "B-person", "I-person", "B-location", "B-time", "I-time", "B-event", "I-event", "B-organization", "I-organization", "I-location", "I-nationality", "B-product", "I-product", "B-artwork", "I-artwork"], "lazy_loading": false, "lazy_loading_start_line": 0, "onnx": false, "special_tokens_list": []}
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f67e14bc3fe36510f7f3fa99726a5240dc63cbb60f051f5b6b97f4f82377b53
3
+ size 4471121457
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcb9993a3bf8dc52862ea7ee98642de4675be90a7efccce629abc35749268364
3
+ size 2235610244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8da0636dfe95dd35ec9c3de89e2da9babbe4b94bbb061482b1a4915cdb79c23
3
+ size 623
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "do_lower_case": false, "model_max_length": 512, "name_or_path": "xlm-roberta-large"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8929d8bac9dae4c7101ccfe58a662b7132989582536babf570858e8bf404898
3
+ size 3055