internal.wav2vec2-base-superb-ks-int8-structured64-quantize-feature-extractor
/
openvino_config.json
{ | |
"compression": [ | |
{ | |
"algorithm": "movement_sparsity", | |
"ignored_scopes": [ | |
"{re}projector", | |
"{re}classifier", | |
"{re}feature_extractor", | |
"{re}feature_projection", | |
"{re}pos_conv_embed" | |
], | |
"params": { | |
"enable_structured_masking": true, | |
"importance_regularization_factor": 0.04, | |
"warmup_end_epoch": 6, | |
"warmup_start_epoch": 1 | |
}, | |
"sparse_structure_by_scopes": [ | |
{ | |
"mode": "block", | |
"sparse_factors": [ | |
32, | |
32 | |
], | |
"target_scopes": "{re}.*Wav2Vec2Attention.*" | |
}, | |
{ | |
"axis": 0, | |
"mode": "per_dim", | |
"target_scopes": "{re}.*intermediate_dense.*" | |
}, | |
{ | |
"axis": 1, | |
"mode": "per_dim", | |
"target_scopes": "{re}.*output_dense.*" | |
} | |
] | |
}, | |
{ | |
"algorithm": "quantization", | |
"export_to_onnx_standard_ops": false, | |
"ignored_scopes": [ | |
"{re}.*__add___[0-1]", | |
"{re}.*layer_norm_0" | |
], | |
"initializer": { | |
"batchnorm_adaptation": { | |
"num_bn_adaptation_samples": 256 | |
}, | |
"range": { | |
"num_init_samples": 512, | |
"params": { | |
"max_percentile": 99.99, | |
"min_percentile": 0.01 | |
}, | |
"type": "percentile" | |
} | |
}, | |
"overflow_fix": "disable", | |
"preset": "mixed", | |
"quantize_inputs": false, | |
"scope_overrides": { | |
"activations": { | |
"{re}.*matmul_0": { | |
"mode": "symmetric" | |
} | |
} | |
} | |
} | |
], | |
"input_info": [ | |
{ | |
"keyword": "input_values", | |
"sample_size": [ | |
32, | |
16000 | |
], | |
"type": "float" | |
} | |
], | |
"log_dir": "/nvme2/yujiepan/workspace/jpqd-test/playground/optimum-playground/0314.example-rerun/logs/w2v2-ks-jpqd-quant-FE-finetuned-student", | |
"optimum_version": "1.6.3", | |
"save_onnx_model": false, | |
"transformers_version": "4.26.0" | |
} | |