csukuangfj's picture
add int8 models
8816664
raw
history blame contribute delete
No virus
1.82 kB
#!/usr/bin/env bash
# Please go to
# https://huggingface.co/Zengwei/icefall-asr-librispeech-conv-emformer-transducer-stateless2-2022-07-05
# to download the pre-trained models
#
# cd $dir
# ln -s icefall-asr-librispeech-conv-emformer-transducer-stateless2-2022-07-05/exp/pretrained-epoch-30-avg-10-averaged.pt epoch-30.pt
. path.sh
export CUDA_VISIBLE_DEVICES=""
dir=test-conv-emformer-librispeech
./conv_emformer_transducer_stateless2/export-for-ncnn.py \
--exp-dir $dir \
--epoch 30 \
--avg 1 \
--use-averaged-model 0 \
\
--num-encoder-layers 12 \
--chunk-length 32 \
--cnn-module-kernel 31 \
--left-context-length 32 \
--right-context-length 8 \
--memory-size 32
mv ./test-conv-emformer-librispeech/encoder_jit_trace-pnnx.pt \
./test-conv-emformer-librispeech/encoder_jit_trace-epoch-30-avg-10-pnnx.pt
mv ./test-conv-emformer-librispeech/decoder_jit_trace-pnnx.pt \
./test-conv-emformer-librispeech/decoder_jit_trace-epoch-30-avg-10-pnnx.pt
mv ./test-conv-emformer-librispeech/joiner_jit_trace-pnnx.pt \
./test-conv-emformer-librispeech/joiner_jit_trace-epoch-30-avg-10-pnnx.pt
cd test-conv-emformer-librispeech
# If you are going to quantize the model using int8, please use
#
# pnnx encoder_jit_trace-epoch-30-avg-10-pnnx.pt fp16=0
# pnnx joiner_jit_trace-epoch-30-avg-10-pnnx.pt fp16=0
#
pnnx encoder_jit_trace-epoch-30-avg-10-pnnx.pt
pnnx decoder_jit_trace-epoch-30-avg-10-pnnx.pt
pnnx joiner_jit_trace-epoch-30-avg-10-pnnx.pt
# Note: You have to change encoder_jit_trace-epoch-30-avg-10-pnnx.ncnn.param
#
#7767517
#1060 1342
#Input in0 0 1 in0
#
# to
#7767517
#1061 1342
#SherpaMetaData sherpa_meta_data1 0 0 0=1 1=12 2=32 3=31 4=8 5=32 6=8 7=512
#Input in0 0 1 in0