foundation_model=/data2/pretrain/meta-llama/Llama-2-7b-hf
amateur_model=ckpt/meta-llama-Llama-2-7b-hf/google-gemma-2-2b-it-sampling-watermark-distill-unigram_gamma0.5_delta2.0
origin_model=/data2/pretrain/google/gemma-2-2b-it
paraphrase_model=/data2/pretrain/google/gemma-2-2b-it
dataset=realnews

# Tunable parameters
baselines=unigram
bsz=32
closed_form=False
prompt_id=4
kl_threshold=1.

cmd="python3 watermark_generate.py \
    --wm_path output_text.json \
    --output_path output_attack_text.json \
    --model_name ${foundation_model} \
    --dataset realnews \
    --baseline unigram \
    --paraphrase_model ${paraphrase_model} \
    --prompt_id ${prompt_id} \
    --amateur_model ${amateur_model} \
    --origin_model ${origin_model} \
    --do_attack \
    --batch_size ${bsz}"
if [[ ${closed_form} == "True" ]]; then
    cmd="${cmd} --closed_form --kl_threshold ${kl_threshold} "
else 
    cmd="${cmd} --coef ${kl_threshold} "
fi

echo $cmd
$cmd

python3 watermark_generate.py \
    --wm_path output_attack_text.json \
    --unwm_path dataset/realnews/human.json \
    --output_path output_score.json \
    --model_name ${foundation_model} \
    --dataset ${dataset} \
    --baseline unigram \
    --gamma 0.5 \
    --delta 2.0 \
    --do_detect \
    --batch_size ${bsz}
