File size: 3,866 Bytes
56121b9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
{
  "builder_config": {
    "fp8": false,
    "hidden_size": 768,
    "huggingface": {
      "_name_or_path": "openai/whisper-small",
      "activation_dropout": 0.0,
      "activation_function": "gelu",
      "architectures": [
        "WhisperForConditionalGeneration"
      ],
      "attention_dropout": 0.0,
      "begin_suppress_tokens": [
        220,
        50257
      ],
      "bos_token_id": 50257,
      "d_model": 768,
      "decoder_attention_heads": 12,
      "decoder_ffn_dim": 3072,
      "decoder_layerdrop": 0.0,
      "decoder_layers": 12,
      "decoder_start_token_id": 50258,
      "dropout": 0.0,
      "encoder_attention_heads": 12,
      "encoder_ffn_dim": 3072,
      "encoder_layerdrop": 0.0,
      "encoder_layers": 12,
      "eos_token_id": 50257,
      "forced_decoder_ids": [
        [
          1,
          50259
        ],
        [
          2,
          50359
        ],
        [
          3,
          50363
        ]
      ],
      "hidden_size": 768,
      "init_std": 0.02,
      "is_encoder_decoder": true,
      "max_length": 448,
      "max_sequence_length": 448,
      "max_source_positions": 1500,
      "max_target_positions": 448,
      "model_type": "whisper",
      "num_hidden_layers": 12,
      "num_layers": 12,
      "num_mel_bins": 80,
      "pad_token_id": 50257,
      "scale_embedding": false,
      "suppress_tokens": [
        1,
        2,
        7,
        8,
        9,
        10,
        14,
        25,
        26,
        27,
        28,
        29,
        31,
        58,
        59,
        60,
        61,
        62,
        63,
        90,
        91,
        92,
        93,
        359,
        503,
        522,
        542,
        873,
        893,
        902,
        918,
        922,
        931,
        1350,
        1853,
        1982,
        2460,
        2627,
        3246,
        3253,
        3268,
        3536,
        3846,
        3961,
        4183,
        4667,
        6585,
        6647,
        7273,
        9061,
        9383,
        10428,
        10929,
        11938,
        12033,
        12331,
        12562,
        13793,
        14157,
        14635,
        15265,
        15618,
        16553,
        16604,
        18362,
        18956,
        20075,
        21675,
        22520,
        26130,
        26161,
        26435,
        28279,
        29464,
        31650,
        32302,
        32470,
        36865,
        42863,
        47425,
        49870,
        50254,
        50258,
        50360,
        50361,
        50362
      ],
      "torch_dtype": "float32",
      "transformers_version": "4.27.0.dev0",
      "use_cache": true,
      "vocab_size": 51865
    },
    "int8": false,
    "max_batch_size": 1,
    "n_mels": 80,
    "name": "whisper",
    "num_heads": 12,
    "num_languages": 99,
    "num_layers": 12,
    "precision": "float16",
    "quant_mode": 0,
    "tensor_parallel": 1,
    "tensorrt": "9.2.0.post12.dev5",
    "use_refit": false
  },
  "plugin_config": {
    "attention_qk_half_accumulation": false,
    "bert_attention_plugin": false,
    "context_fmha_type": 1,
    "gemm_plugin": "float16",
    "gpt_attention_plugin": "float16",
    "identity_plugin": false,
    "layernorm_plugin": false,
    "layernorm_quantization_plugin": false,
    "lookup_plugin": false,
    "lora_plugin": false,
    "multi_block_mode": false,
    "nccl_plugin": false,
    "paged_kv_cache": false,
    "quantize_per_token_plugin": false,
    "quantize_tensor_plugin": false,
    "remove_input_padding": true,
    "rmsnorm_plugin": false,
    "rmsnorm_quantization_plugin": false,
    "smooth_quant_gemm_plugin": false,
    "tokens_per_block": 0,
    "use_custom_all_reduce": false,
    "use_paged_context_fmha": false,
    "weight_only_groupwise_quant_matmul_plugin": false,
    "weight_only_quant_matmul_plugin": false
  }
}