wissamantoun
commited on
Commit
•
ca727f5
1
Parent(s):
bcfac93
Update README.md
Browse files
README.md
CHANGED
@@ -13,6 +13,8 @@ widget:
|
|
13 |
|
14 |
# Arabic GPT2
|
15 |
|
|
|
|
|
16 |
You can find more information in our paper [AraGPT2](https://arxiv.org/abs/2012.15520)
|
17 |
|
18 |
The code in this repository was used to train all GPT2 variants. The code support training and fine-tuning GPT2 on GPUs and TPUs via the TPUEstimator API.
|
@@ -38,7 +40,7 @@ from arabert.aragpt2.grover.modeling_gpt2 import GPT2LMHeadModel
|
|
38 |
|
39 |
from arabert.preprocess import ArabertPreprocessor
|
40 |
|
41 |
-
MODEL_NAME='aragpt2-medium'
|
42 |
arabert_prep = ArabertPreprocessor(model_name=MODEL_NAME)
|
43 |
|
44 |
text=""
|
@@ -73,25 +75,7 @@ python create_pretraining_data.py
|
|
73 |
|
74 |
Finetuning:
|
75 |
```bash
|
76 |
-
python3 run_pretraining.py
|
77 |
-
--input_file="gs://<GS_BUCKET>/pretraining_data/*" \\
|
78 |
-
--output_dir="gs://<GS_BUCKET>/pretraining_model/" \\
|
79 |
-
--config_file="config/small_hparams.json" \\
|
80 |
-
--batch_size=128 \\
|
81 |
-
--eval_batch_size=8 \\
|
82 |
-
--num_train_steps= \\
|
83 |
-
--num_warmup_steps= \\
|
84 |
-
--learning_rate= \\
|
85 |
-
--save_checkpoints_steps= \\
|
86 |
-
--max_seq_length=1024 \\
|
87 |
-
--max_eval_steps= \\
|
88 |
-
--optimizer="lamb" \\
|
89 |
-
--iterations_per_loop=5000 \\
|
90 |
-
--keep_checkpoint_max=10 \\
|
91 |
-
--use_tpu=True \\
|
92 |
-
--tpu_name=<TPU NAME> \\
|
93 |
-
--do_train=True \\
|
94 |
-
--do_eval=False
|
95 |
```
|
96 |
# Model Sizes
|
97 |
|
|
|
13 |
|
14 |
# Arabic GPT2
|
15 |
|
16 |
+
<img src="https://raw.githubusercontent.com/aub-mind/arabert/master/AraGPT2.png" width="100" align="left"/>
|
17 |
+
|
18 |
You can find more information in our paper [AraGPT2](https://arxiv.org/abs/2012.15520)
|
19 |
|
20 |
The code in this repository was used to train all GPT2 variants. The code support training and fine-tuning GPT2 on GPUs and TPUs via the TPUEstimator API.
|
|
|
40 |
|
41 |
from arabert.preprocess import ArabertPreprocessor
|
42 |
|
43 |
+
MODEL_NAME='aubmindlab/aragpt2-medium'
|
44 |
arabert_prep = ArabertPreprocessor(model_name=MODEL_NAME)
|
45 |
|
46 |
text=""
|
|
|
75 |
|
76 |
Finetuning:
|
77 |
```bash
|
78 |
+
python3 run_pretraining.py \\\n --input_file="gs://<GS_BUCKET>/pretraining_data/*" \\\n --output_dir="gs://<GS_BUCKET>/pretraining_model/" \\\n --config_file="config/small_hparams.json" \\\n --batch_size=128 \\\n --eval_batch_size=8 \\\n --num_train_steps= \\\n --num_warmup_steps= \\\n --learning_rate= \\\n --save_checkpoints_steps= \\\n --max_seq_length=1024 \\\n --max_eval_steps= \\\n --optimizer="lamb" \\\n --iterations_per_loop=5000 \\\n --keep_checkpoint_max=10 \\\n --use_tpu=True \\\n --tpu_name=<TPU NAME> \\\n --do_train=True \\\n --do_eval=False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
```
|
80 |
# Model Sizes
|
81 |
|