Commit
•
98feec4
1
Parent(s):
c21de83
Use torchautocast for roberta large (#1)
Browse files- Use torchautocast for roberta large (3b860b99d24c586218d6ea26c65b8b52456526a6)
- Remove mentions of habana mixed precision (8a8eccd7e98bae082c1b8fe65fae09d5bdf64999)
Co-authored-by: Shiv Kaul <skaulintel@users.noreply.huggingface.co>
- README.md +1 -5
- gaudi_config.json +3 -26
README.md
CHANGED
@@ -13,11 +13,7 @@ This model only contains the `GaudiConfig` file for running the [roberta-large](
|
|
13 |
**This model contains no model weights, only a GaudiConfig.**
|
14 |
|
15 |
This enables to specify:
|
16 |
-
- `
|
17 |
-
- `hmp_opt_level`: optimization level for HMP, see [here](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Mixed_Precision/PT_Mixed_Precision.html#configuration-options) for a detailed explanation
|
18 |
-
- `hmp_bf16_ops`: list of operators that should run in bf16
|
19 |
-
- `hmp_fp32_ops`: list of operators that should run in fp32
|
20 |
-
- `hmp_is_verbose`: verbosity
|
21 |
- `use_fused_adam`: whether to use Habana's custom AdamW implementation
|
22 |
- `use_fused_clip_norm`: whether to use Habana's fused gradient norm clipping operator
|
23 |
|
|
|
13 |
**This model contains no model weights, only a GaudiConfig.**
|
14 |
|
15 |
This enables to specify:
|
16 |
+
- `use_torch_autocast`: whether to use PyTorch's autocast mixed precision
|
|
|
|
|
|
|
|
|
17 |
- `use_fused_adam`: whether to use Habana's custom AdamW implementation
|
18 |
- `use_fused_clip_norm`: whether to use Habana's fused gradient norm clipping operator
|
19 |
|
gaudi_config.json
CHANGED
@@ -1,28 +1,5 @@
|
|
1 |
{
|
2 |
-
"
|
3 |
-
"hmp_is_verbose": false,
|
4 |
"use_fused_adam": true,
|
5 |
-
"use_fused_clip_norm": true
|
6 |
-
|
7 |
-
"add",
|
8 |
-
"addmm",
|
9 |
-
"bmm",
|
10 |
-
"div",
|
11 |
-
"dropout",
|
12 |
-
"gelu",
|
13 |
-
"iadd",
|
14 |
-
"linear",
|
15 |
-
"layer_norm",
|
16 |
-
"matmul",
|
17 |
-
"mm",
|
18 |
-
"rsub",
|
19 |
-
"softmax",
|
20 |
-
"truediv"
|
21 |
-
],
|
22 |
-
"hmp_fp32_ops": [
|
23 |
-
"cross_entropy",
|
24 |
-
"embedding",
|
25 |
-
"nll_loss",
|
26 |
-
"log_softmax"
|
27 |
-
]
|
28 |
-
}
|
|
|
1 |
{
|
2 |
+
"use_torch_autocast": true,
|
|
|
3 |
"use_fused_adam": true,
|
4 |
+
"use_fused_clip_norm": true
|
5 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|