Update tiny models for Wav2Vec2ForSequenceClassification
#90
by
hf-transformers-bot
- opened
- config.json +3 -1
- pytorch_model.bin +1 -1
- tf_model.h5 +3 -0
- tokenizer_config.json +2 -2
config.json
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
{
|
|
|
2 |
"activation_dropout": 0.1,
|
|
|
3 |
"adapter_kernel_size": 3,
|
4 |
"adapter_stride": 2,
|
5 |
"add_adapter": false,
|
@@ -79,7 +81,7 @@
|
|
79 |
3
|
80 |
],
|
81 |
"torch_dtype": "float32",
|
82 |
-
"transformers_version": "4.
|
83 |
"use_weighted_layer_sum": false,
|
84 |
"vocab_size": 32,
|
85 |
"xvector_output_dim": 32
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "tiny_models/wav2vec2/Wav2Vec2ForSequenceClassification",
|
3 |
"activation_dropout": 0.1,
|
4 |
+
"adapter_attn_dim": null,
|
5 |
"adapter_kernel_size": 3,
|
6 |
"adapter_stride": 2,
|
7 |
"add_adapter": false,
|
|
|
81 |
3
|
82 |
],
|
83 |
"torch_dtype": "float32",
|
84 |
+
"transformers_version": "4.32.0.dev0",
|
85 |
"use_weighted_layer_sum": false,
|
86 |
"vocab_size": 32,
|
87 |
"xvector_output_dim": 32
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 153130
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c3ec9bd563cd973fc1a773472e0e82d78e5f23add62ff1bf5f34016c5712e10b
|
3 |
size 153130
|
tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:472be9908e6298d85c07cae4d73dc4abd42617f31bd92f7501841e3c2206f04c
|
3 |
+
size 240888
|
tokenizer_config.json
CHANGED
@@ -1,14 +1,14 @@
|
|
1 |
{
|
2 |
"bos_token": "<s>",
|
|
|
3 |
"do_lower_case": false,
|
4 |
"do_normalize": true,
|
5 |
"eos_token": "</s>",
|
6 |
"model_max_length": 9223372036854775807,
|
7 |
-
"name_or_path": "facebook/wav2vec2-base-960h",
|
8 |
"pad_token": "<pad>",
|
9 |
"replace_word_delimiter_char": " ",
|
10 |
"return_attention_mask": false,
|
11 |
-
"
|
12 |
"tokenizer_class": "Wav2Vec2CTCTokenizer",
|
13 |
"unk_token": "<unk>",
|
14 |
"word_delimiter_token": "|"
|
|
|
1 |
{
|
2 |
"bos_token": "<s>",
|
3 |
+
"clean_up_tokenization_spaces": true,
|
4 |
"do_lower_case": false,
|
5 |
"do_normalize": true,
|
6 |
"eos_token": "</s>",
|
7 |
"model_max_length": 9223372036854775807,
|
|
|
8 |
"pad_token": "<pad>",
|
9 |
"replace_word_delimiter_char": " ",
|
10 |
"return_attention_mask": false,
|
11 |
+
"target_lang": null,
|
12 |
"tokenizer_class": "Wav2Vec2CTCTokenizer",
|
13 |
"unk_token": "<unk>",
|
14 |
"word_delimiter_token": "|"
|