export PATH=/home/work_nfs15/asr_data/ckpt/xlgeng/word_seg/openfst-1.6.7/bin:$PATH
export PATH=/home/work_nfs15/asr_data/ckpt/xlgeng/word_seg/fstbin:$PATH
export PATH=/home/work_nfs15/asr_data/ckpt/xlgeng/word_seg/lmbin:$PATH
export PATH=/home/work_nfs15/asr_data/ckpt/xlgeng/word_seg/srilm/path/bin:$PATH
export PATH=/home/work_nfs15/asr_data/ckpt/xlgeng/word_seg/srilm/bin:$PATH
export PATH=/home/work_nfs15/asr_data/ckpt/xlgeng/word_seg/srilm/lm/bin/i686-m64:$PATH


unit_path=/home/work_nfs15/asr_data/ckpt/asr_online_system/lang_char_bpe/tokens.txt
world_list_path=/home/work_nfs15/asr_data/ckpt/xlgeng/word_seg/word_95w.list    # 只有纯粹的词，没虚词。 只要每行split之后的第一个是就行
bpe_model_path=/home/work_nfs15/asr_data/ckpt/asr_online_system/lang_char_bpe/bpe.model
input_text_path=/home/work_nfs11/code/xlgeng/gxl_ai_utils/eggs/cats_and_dogs/ngram_task/data_handler/gxl_data_zoo/poi_text/xian/final_data.list
output_dir='./gxl_arpa_zoo/poi_text/xian'


output_lexison_path=${output_dir}/local/dict/lexicon.txt
cp $unit_path ${output_dir}/local/dict/units.txt
mkdir -p ${output_dir}/local/dict
python tools/fst/prepare_dict.py $unit_path $world_list_path $output_lexison_path $bpe_model_path

lm=${output_dir}/local/lm
mkdir -p $lm
# input_text_path是分过词的文本文件。
#input_text_path=/home/work_nfs8/xlgeng/new_workspace/gxl_ai_utils/eggs/cats_and_dogs/prepare_data_for_en_cn/make_arpa/data_res/finall.txt
cp $input_text_path $lm/text

# 训练ngram模型只需要文本和词典即可
bash local/aishell_train_lms.sh ${output_dir}/local/lm/text ${output_dir}/local/dict/lexicon.txt ${output_dir}/local/lm 3

