ahmedheakl
commited on
ahmedheakl/asm2asm-qwen2.5coder-0.5b-100k-2ep-tokenizer
Browse files- README.md +6 -6
- generation_config.json +1 -1
README.md
CHANGED
@@ -36,12 +36,12 @@ More information needed
|
|
36 |
|
37 |
The following hyperparameters were used during training:
|
38 |
- learning_rate: 0.0002
|
39 |
-
- train_batch_size:
|
40 |
- eval_batch_size: 8
|
41 |
- seed: 42
|
42 |
- gradient_accumulation_steps: 8
|
43 |
-
- total_train_batch_size:
|
44 |
-
- optimizer:
|
45 |
- lr_scheduler_type: linear
|
46 |
- num_epochs: 2
|
47 |
|
@@ -51,7 +51,7 @@ The following hyperparameters were used during training:
|
|
51 |
|
52 |
### Framework versions
|
53 |
|
54 |
-
- Transformers 4.
|
55 |
- Pytorch 2.4.1+cu118
|
56 |
-
- Datasets 3.0.
|
57 |
-
- Tokenizers 0.
|
|
|
36 |
|
37 |
The following hyperparameters were used during training:
|
38 |
- learning_rate: 0.0002
|
39 |
+
- train_batch_size: 1
|
40 |
- eval_batch_size: 8
|
41 |
- seed: 42
|
42 |
- gradient_accumulation_steps: 8
|
43 |
+
- total_train_batch_size: 8
|
44 |
+
- optimizer: Use paged_adamw_32bit with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
45 |
- lr_scheduler_type: linear
|
46 |
- num_epochs: 2
|
47 |
|
|
|
51 |
|
52 |
### Framework versions
|
53 |
|
54 |
+
- Transformers 4.46.0
|
55 |
- Pytorch 2.4.1+cu118
|
56 |
+
- Datasets 3.0.1
|
57 |
+
- Tokenizers 0.20.1
|
generation_config.json
CHANGED
@@ -10,5 +10,5 @@
|
|
10 |
"temperature": 0.7,
|
11 |
"top_k": 20,
|
12 |
"top_p": 0.8,
|
13 |
-
"transformers_version": "4.
|
14 |
}
|
|
|
10 |
"temperature": 0.7,
|
11 |
"top_k": 20,
|
12 |
"top_p": 0.8,
|
13 |
+
"transformers_version": "4.46.0"
|
14 |
}
|