# test multi parameters
for para in ${*:1}
do
    echo ${para}
# conv-s2s
#    CUDA_VISIBLE_DEVICES=3 fairseq-generate --task translation_bpe data-bin/writingPrompts-prompt2story_bpe500_fix \
#      --arch fconv_self_att_wp --path ../fire_data/running/conv-s2s-d0.3_bpe500/checkpoint_best.pt  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/conv-s2s-d0.3_bpe500 --quiet \
#      --skip-invalid-size-inputs-valid-test --min-len 150 --max-len-b 250 --exp-id ${para}
# conv-s2s fusion
#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --task translation_bpe data-bin/writingPrompts-prompt2story_bpe500_fix \
#      --arch fconv_self_att_wp_fusion --path ../fire_data/running/conv-s2s-d0.3-fusion_bpe500/checkpoint_best.pt  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/conv-s2s-d0.3-fusion_bpe500 --quiet \
#      --skip-invalid-size-inputs-valid-test --min-len 150 --max-len-b 250 --exp-id ${para} \
#      --model-overrides "{'pretrained_checkpoint':'../fire_data/running/conv-s2s-d0.3_bpe500/checkpoint_best.pt'}"

# event to story model
#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --task hierstory_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch bilstm_h_transformer_lm_gpt_format --path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre/checkpoint_best.pt  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 40 --nbest 1 \
#      --results-path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre --quiet \
#      --skip-invalid-size-inputs-valid-test --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --task hierstory_bpe data-bin/writingPrompts-srl2story_bpe500 \
#      --arch bilstm_h_transformer_lm_gpt_format --path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl/checkpoint_best.pt  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 40 --nbest 1 \
#      --results-path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl --quiet \
#      --skip-invalid-size-inputs-valid-test --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}
#    for exp in `seq 1 5`
#    do
#        echo 'exp' + ${exp}
#        CUDA_VISIBLE_DEVICES=4 fairseq-generate --task hierstory_bpe data-bin/writingPrompts-srl2story_bpe500 \
#          --arch bilstm_h_transformer_lm_gpt_format --path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl/checkpoint_best.pt  \
#          --batch-size 32 --beam 1 --sampling --sampling-topk ${para} --nbest 1 \
#          --results-path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl --quiet \
#          --skip-invalid-size-inputs-valid-test --use-context True --min-len 150 --max-len-b 250 --exp-id ${exp}
#    done
#    for exp in `seq 1 5`
#    do
#        echo 'exp' + ${exp}
#        CUDA_VISIBLE_DEVICES=4 fairseq-generate --task hierstory_bpe data-bin/writingPrompts-srl2story_bpe500 \
#          --arch bilstm_h_transformer_lm_gpt_format --path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl/checkpoint_best.pt  \
#          --batch-size 32 --beam 1 --sampling --sampling-topk 40 --temperature ${para} --nbest 1 \
#          --results-path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl --quiet \
#          --skip-invalid-size-inputs-valid-test --use-context True --min-len 150 --max-len-b 250 --exp-id ${exp}
#    done

# own event data to story model
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --task hierstory_bpe data-bin/writingPrompts-srl2story_bpe500 \
#      --arch bilstm_h_transformer_lm_gpt_format --path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl/checkpoint_best.pt  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 40 --nbest 1 \
#      --results-path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl --quiet \
#      --skip-invalid-size-inputs-valid-test --use-context True --min-len 150 --max-len-b 250 --exp-id ${para} \
#      --generated-event True --generated-event-path /home/rickwwang/project_research/out/prompt2srl2_500/49hyp.txt.1.topk40.t1.0
#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --task hierstory_bpe data-bin/writingPrompts-srl2story_bpe500_fix \
#      --arch bilstmhier_h_transformer_lm_gpt_format --path ../out/bilstm_transformer-lm_bpe500_format_pre_srl_hier_pos_fix/checkpoint_best.pt  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../out/bilstm_transformer-lm_bpe500_format_pre_srl_hier_pos_fix --quiet \
#      --skip-invalid-size-inputs-valid-test --use-context True --min-len 150 --max-len-b 250 --exp-id ${para} \
#      --generated-event True --generated-event-path /home/rickwwang/project_research/out/prompt2srl2_500_wo_smooth_d1/40hyp.txt.1.topk20.t1.0.nr6
#    CUDA_VISIBLE_DEVICES=1 fairseq-generate --task hierstory_bpe data-bin/writingPrompts-srl2story_bpe500_fix \
#      --arch bilstmhier_h_transformer_lm_gpt_format --path ../out/bilstm_transformer-lm_bpe500_format_pre_srl_hier_pos_fix/checkpoint_best.pt  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../out/bilstm_transformer-lm_bpe500_format_pre_srl_hier_pos_fix --quiet \
#      --skip-invalid-size-inputs-valid-test --use-context True --min-len 150 --max-len-b 250 --exp-id ${para} \
#      --generated-event True --generated-event-path /home/rickwwang/project_research/fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss/29hyp.txt.1.topk20.t1.0.nr6
#    CUDA_VISIBLE_DEVICES=2 fairseq-generate --task hierstory_bpe data-bin/writingPrompts-srl2story_bpe500_fix \
#      --arch bilstmhier_h_transformer_lm_gpt_format --path ../out/bilstm_transformer-lm_bpe500_format_pre_srl_hier_pos_fix/checkpoint_best.pt  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../out/bilstm_transformer-lm_bpe500_format_pre_srl_hier_pos_fix --quiet \
#      --skip-invalid-size-inputs-valid-test --use-context True --min-len 150 --max-len-b 250 --exp-id ${para} \
#      --generated-event True --generated-event-path /home/rickwwang/project_research/fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss4/21hyp.txt.1.topk20.t1.0.nr6
#    CUDA_VISIBLE_DEVICES=2 fairseq-generate --task hierstory_bpe data-bin/writingPrompts-srl2story_bpe500_fix \
#      --arch bilstmhier4_h_transformer_lm_gpt_format --path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl_hier4_fix/checkpoint_best.pt  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl_hier4_fix --quiet \
#      --skip-invalid-size-inputs-valid-test --use-context True --min-len 150 --max-len-b 250 --exp-id ${para} \
#      --generated-event True --generated-event-path /home/rickwwang/project_research/out/prompt2srl2_500_wo_smooth_d1/40hyp.txt.1.topk20.t1.0.nr6
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --task hierstory_bpe data-bin/writingPrompts-srl2story_bpe500_fix \
#      --arch bilstmhier4_h_transformer_lm_gpt_format --path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl_hier4_fix/checkpoint_best.pt  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/bilstm_transformer-lm_bpe500_format_pre_srl_hier4_fix --quiet \
#      --skip-invalid-size-inputs-valid-test --use-context True --min-len 150 --max-len-b 250 --exp-id ${para} \
#      --generated-event True --generated-event-path /home/rickwwang/project_research/fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss/29hyp.txt.1.topk20.t1.0.nr6



# story language model
#    CUDA_VISIBLE_DEVICES=3 fairseq-generate --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_gpt_bpe_format --path ../fire_data/running/transformer-lm_bpe500_warmlr_format2/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format2 --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_gpt_bpe_format --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2 --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format --path ../out/transformer-lm_bpe500_warmlr_format_pre2_absstory/checkpoint_best.pt \
#      --data-sufix .new-None.new --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../out/transformer-lm_bpe500_warmlr_format_pre2_absstory --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}

#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_gpt_bpe_format --path ../fire_data/running/transformer-lm_bpe500_warmlr_format2/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format2 --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 300 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=1 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_gpt_bpe_format --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2 --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 300 --exp-id ${para}


#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_bpe_memory_gpt_format --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_memory_gpt/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_memory_gpt --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_bpe_memory_gate_gpt_format --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_memory_gate_gpt/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_memory_gate_gpt --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=2 fairseq-generate --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_bpe_coherence_gpt_format --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_coherence_gpt/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_coherence_gpt --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_bpe_memory_gate_coherence_gpt_format --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_memory_gate_coherence_gpt/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_memory_gate_coherence_gpt --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}

#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_bpe_memory_gpt_format --path ../out/transformer-lm_bpe500_warmlr_format_pre2_memory/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../out/transformer-lm_bpe500_warmlr_format_pre2_memory --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}

#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_bpe_memory_gate_relevance_gpt_format --path ../out/transformer-lm_bpe500_warmlr_format_pre2_memory_gate_relevance_gpt_fix/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../out/transformer-lm_bpe500_warmlr_format_pre2_memory_gate_relevance_gpt_fix --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}

# coref lm
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_coref/checkpoint_best.pt \
#      --data-sufix .event-new.new  --model-overrides="{'use_attn': False}" \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_coref --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_coref/checkpoint_best.pt \
#      --data-sufix .event-new.new  --model-overrides="{'use_attn': False}" \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_coref --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_coref/checkpoint_best.pt \
#      --data-sufix .event-new.new  --model-overrides="{'use_attn': False}" \
#      --batch-size 1 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2_coref --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 250 --exp-id ${para}



# discourse lm
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptdstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptdstory/checkpoint_best.pt \
#      --data-sufix .promptdstory-None.promptdstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptdstory --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_discourse data-bin/writingPrompts-promptdstory_discourse_bpe500  \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptdstory_discourse_two/checkpoint_last.pt \
#      --data-sufix .promptdstory-None.promptdstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptdstory_discourse_two --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 300 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=1 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_discourse data-bin/writingPrompts-promptdstory_discourse_bpe500  \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptdstory_discourse/checkpoint_best.pt \
#      --data-sufix .promptdstory-None.promptdstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptdstory_discourse --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 300 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=3 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_discourse data-bin/writingPrompts-promptdstory_discourse_bpe500  \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptdstory_discourse_two_01/checkpoint_best.pt \
#      --data-sufix .promptdstory-None.promptdstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptdstory_discourse_two_01 --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 300 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=2 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_discourse data-bin/writingPrompts-promptdstory_discourse_bpe500  \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptdstory_discourse_two_05/checkpoint_last.pt \
#      --data-sufix .promptdstory-None.promptdstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptdstory_discourse_two_05 --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 300 --exp-id ${para}
#    for exp in `seq 1 5`
#    do
#        echo 'exp' + ${exp}
#        CUDA_VISIBLE_DEVICES=5 fairseq-generate --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#          --arch transformer_lm_gpt_bpe_format --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2/checkpoint_best.pt \
#          --batch-size 32 --beam 1 --sampling --sampling-topk ${para} --nbest 1 \
#          --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2 --quiet \
#          --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#          --use-context True --min-len 150 --max-len-b 250 --exp-id ${exp}
#    done
#    for exp in `seq 1 5`
#    do
#        echo 'exp' + ${exp}
#        CUDA_VISIBLE_DEVICES=6 fairseq-generate --task language_modeling_bpe data-bin/writingPrompts-event2story_bpe500 \
#          --arch transformer_lm_gpt_bpe_format --path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2/checkpoint_best.pt \
#          --batch-size 32 --beam 1 --sampling --sampling-topk 40 --temperature ${para} --nbest 1 \
#          --results-path ../fire_data/running/transformer-lm_bpe500_warmlr_format_pre2 --quiet \
#          --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#          --use-context True --min-len 150 --max-len-b 250 --exp-id ${exp}
#    done

# prompt2allen lm
# prompt2abs lm
#CUDA_VISIBLE_DEVICES=4 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-prompt2abs_bpe500 \
#  --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/prompt2abs/checkpoint_best.pt \
#  --data-sufix .promptabs-None.promptabs --truncate \
#  --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#  --results-path ../fire_data/running/prompt2abs --quiet \
#  --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#  --use-context True --min-len 10 --max-len-b 250 --exp-id ${para}
#CUDA_VISIBLE_DEVICES=4 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-prompt2abs_bpe500 \
#  --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/prompt2abs/checkpoint_best.pt \
#  --data-sufix .promptabs-None.promptabs --truncate \
#  --batch-size 32 --beam 1 --sampling --sampling-topk 20 --temperature ${para} --nbest 1 \
#  --results-path ../fire_data/running/prompt2abs --quiet \
#  --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#  --use-context True --min-len 10 --max-len-b 250 --exp-id 1
#CUDA_VISIBLE_DEVICES=2 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-prompt2abs_bpe500 \
#  --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/prompt2abs/checkpoint_best.pt \
#  --data-sufix .promptabs-None.promptabs --truncate \
#  --batch-size 32 --beam 1 --sampling --sampling-topk ${para} --nbest 1 \
#  --results-path ../fire_data/running/prompt2abs --quiet \
#  --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#  --use-context True --min-len 10 --max-len-b 250 --exp-id 1
#CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-prompt2abs_bpe500 \
#  --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/prompt2abs/checkpoint_best.pt \
#  --data-sufix .promptabs-None.promptabs --truncate \
#  --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#  --results-path ../fire_data/running/prompt2abs --quiet \
#  --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#  --use-context True --min-len 10 --max-len-b 250 --exp-id 1 --no-repeat-ngram-size ${para}
#CUDA_VISIBLE_DEVICES=2 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-prompt2abs_bpe500 \
#  --arch transformer_lm_gpt_bpe_format  --path ../out/prompt2abs_pre/checkpoint_best.pt \
#  --data-sufix .promptabs-None.promptabs --truncate \
#  --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#  --results-path ../out/prompt2abs_pre --quiet \
#  --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#  --use-context True --min-len 10 --max-len-b 250 --exp-id  ${para}

#CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-prompt2abs_bpe500 \
#  --arch transformer_lm_bpe_memory_gate_gpt_format  --path ../out/prompt2abs_memory_gate/checkpoint_best.pt \
#  --data-sufix .promptabs-None.promptabs --truncate \
#  --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#  --results-path ../out/prompt2abs_memory_gate --quiet \
#  --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#  --use-context True --min-len 10 --max-len-b 250 --exp-id ${para}


# prompt2keyword lm
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-prompt2keyword_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/prompt2keyword/checkpoint_best.pt \
#      --data-sufix .promptkeyword-None.promptkeyword --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/prompt2keyword --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 10 --max-len-b 250 --exp-id ${para}

# absstory lm
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-absstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/absstory/checkpoint_best.pt \
#      --data-sufix .absstory-None.absstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/absstory --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 450 --max-len-b 550 --exp-id ${para}

# promptabsstory lm
#    CUDA_VISIBLE_DEVICES=6 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory/checkpoint_best.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=6 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory/checkpoint45.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0.for2input
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory/checkpoint45.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/40hyp.txt.1.topk20.t1.0.for2input

#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory_mask_abs/checkpoint_best.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory_mask_abs/checkpoint_best.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0.for2input

#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory_mask_abs/checkpoint_best.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 10 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0.for2input


#    CUDA_VISIBLE_DEVICES=2 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory_mask_abs_015_abs/checkpoint_best.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_015_abs --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=3 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory_mask_abs_015_abs/checkpoint_best.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_015_abs --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0.for2input
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory_mask_abs_050_abs/checkpoint_best.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_050_abs --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory_mask_abs_050_abs/checkpoint_best.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_050_abs --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0.for2input
#    CUDA_VISIBLE_DEVICES=6 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptabsstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptabsstory_mask_abs_015_abs/checkpoint_best.pt \
#      --data-sufix .promptabsstory-None.promptabsstory --truncate  \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_015_abs --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/out/prompt2abs_pre/3hyp.txt.1.min10.0.max250.topk20.t1.0.for2input

#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_coref_dis data-bin/writingPrompts-prompabsdstory_coref_discourse_bpe500 \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptabsstory_mask_abs_coref_dis/checkpoint_last.pt \
#      --data-sufix .promptabsdstory-None.promptabsdstory --truncate  --model-overrides="{'use_attn': False}" \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_coref_dis --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=1 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_coref_dis data-bin/writingPrompts-prompabsdstory_coref_discourse_bpe500 \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptabsstory_mask_abs_coref_dis/checkpoint_last.pt \
#      --data-sufix .promptabsdstory-None.promptabsdstory --truncate  --model-overrides="{'use_attn': False}" \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_coref_dis --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0.for2input
#    CUDA_VISIBLE_DEVICES=7 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_coref_dis data-bin/writingPrompts-prompabsdstory_coref_discourse_bpe500 \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptabsstory_mask_abs_coref_dis/checkpoint_best.pt \
#      --data-sufix .promptabsdstory-None.promptabsdstory --truncate  --model-overrides="{'use_attn': False}" \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_coref_dis --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/out/prompt2abs_pre/3hyp.txt.1.min10.0.max250.topk20.t1.0.for2input
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_coref_dis data-bin/writingPrompts-prompabsdstory_coref_discourse_bpe500 \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptabsstory_mask_abs_coref_dis_015s/checkpoint_best.pt \
#      --data-sufix .promptabsdstory-None.promptabsdstory --truncate  --model-overrides="{'use_attn': False}" \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_coref_dis_015s --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_coref_dis data-bin/writingPrompts-prompabsdstory_coref_discourse_bpe500 \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptabsstory_mask_abs_coref_dis_015s/checkpoint_best.pt \
#      --data-sufix .promptabsdstory-None.promptabsdstory --truncate  --model-overrides="{'use_attn': False}" \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_coref_dis_015s --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0.for2input
#    CUDA_VISIBLE_DEVICES=6 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_coref_dis data-bin/writingPrompts-prompabsdstory_coref_discourse_bpe500 \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptabsstory_mask_abs_coref_dis_015s/checkpoint_best.pt \
#      --data-sufix .promptabsdstory-None.promptabsdstory --truncate  --model-overrides="{'use_attn': False}" \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_coref_dis_015s --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/out/prompt2abs_pre/3hyp.txt.1.min10.0.max250.topk20.t1.0.for2input

#    CUDA_VISIBLE_DEVICES=6 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_coref_dis data-bin/writingPrompts-prompabsdstory_coref_discourse_bpe500 \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptabsstory_mask_abs_coref_dis_015s_3010/checkpoint_best.pt \
#      --data-sufix .promptabsdstory-None.promptabsdstory --truncate  --model-overrides="{'use_attn': False}" \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_coref_dis_015s_3010 --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0.for2input

#    CUDA_VISIBLE_DEVICES=7 fairseq-generate --user-dir coherence_story --task language_modeling_bpe_coref_dis data-bin/writingPrompts-prompabsdstory_coref_discourse_bpe500 \
#      --arch transformer_lm_bpe_gpt_format_discourse  --path ../fire_data/running/promptabsstory_mask_abs_coref_dis_015s_3010_fine/checkpoint_best.pt \
#      --data-sufix .promptabsdstory-None.promptabsdstory --truncate  --model-overrides="{'use_attn': False}" \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptabsstory_mask_abs_coref_dis_015s_3010_fine --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0.for2input


# promptkeywordstory lm
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptkeywordstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptkeywordstory/checkpoint_best.pt \
#      --data-sufix .promptkeywordstory-None.promptkeywordstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptkeywordstory --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptkeywordstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptkeywordstory/checkpoint_best.pt \
#      --data-sufix .promptkeywordstory-None.promptkeywordstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptkeywordstory --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2keyword/6hyp.txt.1.topk20.t1.0.for2input
#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptkeywordstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptkeywordstory_mask_keyword/checkpoint_best.pt \
#      --data-sufix .promptkeywordstory-None.promptkeywordstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptkeywordstory_mask_keyword --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=2 fairseq-generate --user-dir coherence_story --task language_modeling_bpe data-bin/writingPrompts-promptkeywordstory_bpe500 \
#      --arch transformer_lm_gpt_bpe_format  --path ../fire_data/running/promptkeywordstory_mask_keyword/checkpoint_best.pt \
#      --data-sufix .promptkeywordstory-None.promptkeywordstory --truncate \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/promptkeywordstory_mask_keyword --quiet \
#      --skip-invalid-size-inputs-valid-test --tokens-per-sample 1024 --sample-break-mode eos \
#      --use-context True --min-len 150 --max-len-b 550 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2keyword/6hyp.txt.1.topk20.t1.0.for2input


# allen2story trans

# abs2story trans
#    CUDA_VISIBLE_DEVICES=0 python generate.py --user-dir coherence_story --task translation_bpe data-bin/writingPrompts-abs2story_bpe500 \
#      --arch transformer_prompt_to_event_bpe  --path ../fire_data/running/abs2story_small_ls0/checkpoint_best.pt \
#      --src-dict-type bpe --tgt-dict-type bpe --truncate --left-pad-source False \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/abs2story_small_ls0 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --exp-id ${para}
#    CUDA_VISIBLE_DEVICES=2 python generate.py --user-dir coherence_story --task translation_bpe data-bin/writingPrompts-abs2story_bpe500 \
#      --arch transformer_prompt_to_event_bpe  --path ../fire_data/running/abs2story_small_ls0/checkpoint_best.pt \
#      --src-dict-type bpe --tgt-dict-type bpe --truncate --left-pad-source False \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/abs2story_small_ls0 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --exp-id ${para} \
#      --external-data --external-src-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0 \
#      --external-tgt-path /home/rickwwang/project_research/fire_data/running/prompt2abs/14hyp.txt.1.topk20.t1.0.tgt

# prompt to event model
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_prompt_to_event --path ../out/prompt2srl2_500/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 40 --nbest 1 \
#      --results-path ../out/prompt2srl2_500 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para}
#    for exp in `seq 1 5`
#    do
#        echo 'exp' + ${exp}
#        CUDA_VISIBLE_DEVICES=7 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#          --arch transformer_prompt_to_event --path ../out/prompt2srl2_500/checkpoint_best.pt \
#          --batch-size 32 --beam 1 --sampling --sampling-topk ${para} --nbest 1 \
#          --results-path ../out/prompt2srl2_500 --quiet \
#          --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${exp}
#    done
#    CUDA_VISIBLE_DEVICES=6 fairseq-generate --task translation data-bin/writingPrompts-prompt2event_500 \
#      --arch transformer_prompt_to_event --path ../fire_data/500version/prompt2event_500/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk ${para} --nbest 1 \
#      --results-path ../fire_data/500version/prompt2event_500 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id 1
#    CUDA_VISIBLE_DEVICES=3 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_prompt_to_event --path ../out/prompt2srl2_500/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk ${para} --nbest 1 \
#      --results-path ../out/prompt2srl2_500 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id 1 --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_prompt_to_event --path ../out/prompt2srl2_500_wo_smooth_d1/checkpoint35.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../out/prompt2srl2_500_wo_smooth_d1 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para} --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=3 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_verb_prompt_to_event --use-verb-attention --path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para} --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=2 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_verb_loss_prompt_to_event --path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss/checkpoint25.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para} --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=3 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_verb_loss2_prompt_to_event --path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss2/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss2 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para} --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=1 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_verb_loss3_prompt_to_event --path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss3/checkpoint40.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss3 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para} --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=0 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_verb_loss4_prompt_to_event --path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss4/checkpoint30.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss4 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para} --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=2 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_verb_loss32_prompt_to_event --path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss32/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss32 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para} --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_verb_loss322_prompt_to_event --path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss322/checkpoint20.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss322 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para} --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=3 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_verb_loss_prompt_to_event --path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss12/checkpoint20.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss12 --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para} --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=5 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500 \
#      --arch transformer_verb_loss323_prompt_to_event --path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss323_first/checkpoint20.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 \
#      --results-path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_verb_loss323_first --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id ${para} --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=4 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500_nosympol \
#      --arch transformer_prompt_to_event_big --path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_gpt2_nosympol/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 --temperature ${para} \
#      --results-path ../fire_data/running/prompt2srl2_500_wo_smooth_d1_gpt2_nosympol --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id 11 --no-repeat-ngram-size 6
#    CUDA_VISIBLE_DEVICES=6 fairseq-generate --task translation data-bin/writingPrompts-prompt2srl2_500_nosympol \
#      --arch transformer_prompt_to_event_big --path ../fire_data/running/prompt2srl2_500_gpt2_nosympol/checkpoint_best.pt \
#      --batch-size 32 --beam 1 --sampling --sampling-topk 20 --nbest 1 --temperature ${para} \
#      --results-path ../fire_data/running/prompt2srl2_500_gpt2_nosympol --quiet \
#      --skip-invalid-size-inputs-valid-test  --min-len 150 --max-len-b 250 --unkpen 1 --exp-id 11 --no-repeat-ngram-size 6

done
