File size: 1,907 Bytes
52b02cb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
{
"compression": [
{
"algorithm": "movement_sparsity",
"ignored_scopes": [
"{re}.*PatchEmbed.*",
"{re}.*PatchMerging.*",
"{re}.*classifier.*",
"{re}.*LayerNorm.*"
],
"params": {
"enable_structured_masking": true,
"importance_regularization_factor": 1.0,
"warmup_end_epoch": 5,
"warmup_start_epoch": 2
},
"sparse_structure_by_scopes": [
{
"mode": "block",
"sparse_factors": [
16,
16
],
"target_scopes": "{re}.*SwinAttention.*"
},
{
"axis": 0,
"mode": "per_dim",
"target_scopes": "{re}.*SwinIntermediate.*"
},
{
"axis": 1,
"mode": "per_dim",
"target_scopes": "{re}.*SwinOutput.*"
}
]
},
{
"algorithm": "quantization",
"export_to_onnx_standard_ops": false,
"ignored_scopes": [
"{re}.*__add___[0-1]",
"{re}.*layer_norm_0",
"{re}.*matmul_1",
"{re}.*__truediv__*"
],
"initializer": {
"batchnorm_adaptation": {
"num_bn_adaptation_samples": 200
},
"range": {
"num_init_samples": 32,
"params": {
"max_percentile": 99.99,
"min_percentile": 0.01
},
"type": "percentile"
}
},
"overflow_fix": "enable",
"preset": "mixed",
"scope_overrides": {
"activations": {
"{re}.*matmul_0": {
"mode": "symmetric"
}
}
}
}
],
"input_info": [
{
"keyword": "pixel_values",
"sample_size": [
16,
3,
224,
224
],
"type": "float"
}
],
"optimum_version": "1.7.1",
"save_onnx_model": false,
"transformers_version": "4.26.1"
}
|