|
advance: null |
|
approach: post_training_static_quant |
|
calib_iteration: 7 |
|
framework: pytorch |
|
op: |
|
? !!python/tuple |
|
- quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.0.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.0.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.0.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.0.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.0.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.0.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.0.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.0.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.0.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.0.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.1.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.1.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.1.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.1.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.1.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.1.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.1.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.1.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.1.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.1.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.2.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.2.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.2.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.2.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.2.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.2.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.2.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.2.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.2.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.2.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.3.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.3.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.3.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.3.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.3.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.3.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.3.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.3.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.3.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.3.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.4.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.4.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.4.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.4.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.4.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.4.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.4.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.4.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.4.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.4.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.5.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.5.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.5.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.5.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.5.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.5.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.5.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.5.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.5.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.5.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.6.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.6.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.6.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.6.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.6.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.6.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.6.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.6.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.6.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.6.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.7.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.7.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.7.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.7.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.7.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.7.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.7.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.7.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.7.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.7.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.8.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.8.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.8.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.8.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.8.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.8.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.8.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.8.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.8.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.8.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.9.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.9.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.9.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.9.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.9.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.9.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.9.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.9.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.9.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.9.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.10.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.10.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.10.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.10.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.10.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.10.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.10.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.10.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.10.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.10.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.11.attention.self.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.11.attention.self.query |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.11.attention.self.key |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.11.attention.self.value |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.11.attention.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.11.attention.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.11.intermediate.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.11.intermediate.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.encoder.layer.11.output.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.encoder.layer.11.output.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- bert.pooler.quant |
|
- QuantStub |
|
: activation: |
|
dtype: uint8 |
|
scheme: asym |
|
granularity: per_tensor |
|
algorithm: minmax |
|
? !!python/tuple |
|
- bert.pooler.dense |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
? !!python/tuple |
|
- classifier |
|
- Linear |
|
: weight: |
|
dtype: int8 |
|
scheme: sym |
|
granularity: per_channel |
|
algorithm: minmax |
|
bit: 7.0 |
|
activation: |
|
dtype: uint8 |
|
scheme: sym |
|
granularity: per_tensor |
|
algorithm: kl |
|
|