Use torchautocast for roberta large

#1
by skaulintel - opened
Files changed (2) hide show
  1. README.md +1 -5
  2. gaudi_config.json +3 -26
README.md CHANGED
@@ -13,11 +13,7 @@ This model only contains the `GaudiConfig` file for running the [roberta-large](
13
  **This model contains no model weights, only a GaudiConfig.**
14
 
15
  This enables to specify:
16
- - `use_habana_mixed_precision`: whether to use Habana Mixed Precision (HMP)
17
- - `hmp_opt_level`: optimization level for HMP, see [here](https://docs.habana.ai/en/latest/PyTorch/PyTorch_Mixed_Precision/PT_Mixed_Precision.html#configuration-options) for a detailed explanation
18
- - `hmp_bf16_ops`: list of operators that should run in bf16
19
- - `hmp_fp32_ops`: list of operators that should run in fp32
20
- - `hmp_is_verbose`: verbosity
21
  - `use_fused_adam`: whether to use Habana's custom AdamW implementation
22
  - `use_fused_clip_norm`: whether to use Habana's fused gradient norm clipping operator
23
 
 
13
  **This model contains no model weights, only a GaudiConfig.**
14
 
15
  This enables to specify:
16
+ - `use_torch_autocast`: whether to use PyTorch's autocast mixed precision
 
 
 
 
17
  - `use_fused_adam`: whether to use Habana's custom AdamW implementation
18
  - `use_fused_clip_norm`: whether to use Habana's fused gradient norm clipping operator
19
 
gaudi_config.json CHANGED
@@ -1,28 +1,5 @@
1
  {
2
- "use_habana_mixed_precision": true,
3
- "hmp_is_verbose": false,
4
  "use_fused_adam": true,
5
- "use_fused_clip_norm": true,
6
- "hmp_bf16_ops": [
7
- "add",
8
- "addmm",
9
- "bmm",
10
- "div",
11
- "dropout",
12
- "gelu",
13
- "iadd",
14
- "linear",
15
- "layer_norm",
16
- "matmul",
17
- "mm",
18
- "rsub",
19
- "softmax",
20
- "truediv"
21
- ],
22
- "hmp_fp32_ops": [
23
- "cross_entropy",
24
- "embedding",
25
- "nll_loss",
26
- "log_softmax"
27
- ]
28
- }
 
1
  {
2
+ "use_torch_autocast": true,
 
3
  "use_fused_adam": true,
4
+ "use_fused_clip_norm": true
5
+ }