speech-test commited on
Commit
0a1a74d
1 Parent(s): ee66bb6
Files changed (2) hide show
  1. config.json +8 -1
  2. pytorch_model.bin +1 -1
config.json CHANGED
@@ -1,6 +1,9 @@
1
  {
2
  "_name_or_path": "facebook/wav2vec2-base-960h",
3
  "activation_dropout": 0.0,
 
 
 
4
  "apply_spec_augment": true,
5
  "architectures": [
6
  "Wav2Vec2ForXVector"
@@ -2488,8 +2491,10 @@
2488
  "mask_channel_prob": 0.0,
2489
  "mask_channel_selection": "static",
2490
  "mask_feature_length": 10,
 
2491
  "mask_feature_prob": 0.0,
2492
  "mask_time_length": 10,
 
2493
  "mask_time_min_space": 1,
2494
  "mask_time_other": 0.0,
2495
  "mask_time_prob": 0.05,
@@ -2497,6 +2502,7 @@
2497
  "model_type": "wav2vec2",
2498
  "no_mask_channel_overlap": false,
2499
  "no_mask_time_overlap": false,
 
2500
  "num_attention_heads": 12,
2501
  "num_codevector_groups": 2,
2502
  "num_codevectors_per_group": 320,
@@ -2505,6 +2511,7 @@
2505
  "num_feat_extract_layers": 7,
2506
  "num_hidden_layers": 12,
2507
  "num_negatives": 100,
 
2508
  "pad_token_id": 0,
2509
  "proj_codevector_dim": 256,
2510
  "tdnn_dilation": [
@@ -2529,7 +2536,7 @@
2529
  1
2530
  ],
2531
  "torch_dtype": "float32",
2532
- "transformers_version": "4.13.0.dev0",
2533
  "use_weighted_layer_sum": true,
2534
  "vocab_size": 32,
2535
  "xvector_output_dim": 512
 
1
  {
2
  "_name_or_path": "facebook/wav2vec2-base-960h",
3
  "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
  "apply_spec_augment": true,
8
  "architectures": [
9
  "Wav2Vec2ForXVector"
 
2491
  "mask_channel_prob": 0.0,
2492
  "mask_channel_selection": "static",
2493
  "mask_feature_length": 10,
2494
+ "mask_feature_min_masks": 0,
2495
  "mask_feature_prob": 0.0,
2496
  "mask_time_length": 10,
2497
+ "mask_time_min_masks": 2,
2498
  "mask_time_min_space": 1,
2499
  "mask_time_other": 0.0,
2500
  "mask_time_prob": 0.05,
 
2502
  "model_type": "wav2vec2",
2503
  "no_mask_channel_overlap": false,
2504
  "no_mask_time_overlap": false,
2505
+ "num_adapter_layers": 3,
2506
  "num_attention_heads": 12,
2507
  "num_codevector_groups": 2,
2508
  "num_codevectors_per_group": 320,
 
2511
  "num_feat_extract_layers": 7,
2512
  "num_hidden_layers": 12,
2513
  "num_negatives": 100,
2514
+ "output_hidden_size": 768,
2515
  "pad_token_id": 0,
2516
  "proj_codevector_dim": 256,
2517
  "tdnn_dilation": [
 
2536
  1
2537
  ],
2538
  "torch_dtype": "float32",
2539
+ "transformers_version": "4.14.0.dev0",
2540
  "use_weighted_layer_sum": true,
2541
  "vocab_size": 32,
2542
  "xvector_output_dim": 512
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:690c012f7cc09e6647012445fac9b046627d08a7aef5551172be9a5f89a145e7
3
  size 404494903
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dca2df146a2340fa99af66baee448b42560e73f2e4f10dc507eb5982987e0f7a
3
  size 404494903