# source activate base # autodl
source activate ke2torch23cu121 # 3090
# export HUGGINGFACE_CACHE=/root/autodl-fs/huggingface/
export HUGGINGFACE_CACHE=/share/huggingface/
export PYTHONUNBUFFERED=1

method=ICE
DATE=$(date +"%Y-%m-%d")
# autodl
LOG_ROOT="./${method}-log/${DATE}"
LOG_DIRS=("${LOG_ROOT}" "${LOG_ROOT}/${method}-js" "${LOG_ROOT}/${method}-kl" "${LOG_ROOT}/${method}-wasserstein")

for log_dir in "${LOG_DIRS[@]}"; do
    if [ ! -d "${log_dir}" ]; then
        mkdir -p "${log_dir}"
    fi
done

# # KnowEdit datasets
# declare -A datasets=(
#     ["zsre"]="../dataset-ke/KnowEdit/benchmark/ZsRE/ZsRE-test-all.json"
#     ["recent"]="../dataset-ke/KnowEdit/benchmark/wiki_recent/recent_test.json"
#     ["counterfact"]="../dataset-ke/KnowEdit/benchmark/wiki_counterfact/test_cf.json"
#     ["wikibio"]="../dataset-ke/KnowEdit/benchmark/WikiBio/wikibio-test-all.json"
# )


# ICE KnowEdit datasets
declare -A datasets=(
    ["zsre"]="./dataset-ke/${method}/zsre.json"
    ["recent"]="./dataset-ke/${method}/wikidata_recent.json" # 此数据集有一定难度,分数相对最低.
    ["counterfact"]="./dataset-ke/${method}/wikidata_counterfact.json"
    ["wikibio"]="./dataset-ke/${method}/wikibio.json"
)
# --test_generation 是否测试生成,计算fluency指标
# --save_gen_sentence 是否保存生成的句子,观察编辑后输出重复等问题
# --single_edit 
# 默认为false,指定位true
# for loss_div_alpha in 0.4 0.6 0.8; do
# gpu_id=0
# model=gpt2-xl 0/13-17/8/45-47
# model=gpt-j-6b 21/3-8/25-27
# model=llama2-7b-chat 27/21/4-8/29-31
# model=llama3-8b-instruct 29/21/4-8
# model=Qwen-7B-Chat # 5/31/4-8/29-31
# model=Qwen1.5-7B-Chat # 5/27/4-8
# model=Qwen2-7B-Instruct # 5/27/4-8/25-27
# model=Qwen2.5-7B-Instruct # 23/5/27/4-8/25-27

loss_div_alpha=1.0
early_stop_steps=5
method_type=wasserstein
wst_type=top_k:20
module=origin

# for method_type in kl js; do
# for wst_type in top_k:20 top_k:100 top_k:1000 mean; do
# for module in mlp attn all; do
# for model in gpt-j-6b llama2-7b-chat llama3-8b-instruct Qwen-7B-Chat Qwen1.5-7B-Chat Qwen2-7B-Instruct Qwen2.5-7B-Instruct; do
for model in gpt-j-6b llama2-7b-chat llama3-8b-instruct Qwen-7B-Chat; do
# for model in Qwen1.5-7B-Chat Qwen2-7B-Instruct Qwen2.5-7B-Instruct; do
    # for layers in 5 21 23 27 29 31 25,26,27 29,30,31 4,5,6,7,8; do
    # for layers in 21 22 23 24 25 26 27 28 29 30 31; do
    for layers in 22 24 25 26 28 30; do
        gpu_id=0
        cnt="fast-ly:${layers}-tg_ctx-${method_type}-${wst_type}:${loss_div_alpha}-stop-v2:${early_stop_steps}-${module}-single_edit"
        for datatype in "${!datasets[@]}"; do
            data_dir="${datasets[$datatype]}"
            echo "${DATE}/${method}-${method_type}/${model}-${datatype}-${gpu_id}-${cnt}"
            CUDA_VISIBLE_DEVICES=${gpu_id} python examples/run_knowedit_llama2.py \
                --editing_method=${method} \
                --layers=${layers} \
                --module ${module} \
                --wst_type=${wst_type} \
                --loss_div_alpha=${loss_div_alpha} \
                --early_stop_steps=${early_stop_steps} \
                --p_dist=${method_type} \
                --single_edit \
                --datatype=${datatype} \
                --hparams_dir=./hparams/${method}/${model}.yaml \
                --objective_optimization=target_new_with_context \
                --data_dir=${data_dir} \
                --metrics_save_dir=./${method}-results/${model}/${method}-${method_type}-${DATE}-${cnt} \
                --gen_sentence_save_dir=./${method}-outputs/${model}/${method}-${method_type}-${DATE}-${cnt} \
                --pre_file=./pre_edit/${model}_${datatype}_pre_edit.json \
                > ./${method}-log/${DATE}/${method}-${method_type}/${model}-${datatype}-${gpu_id}-${cnt}.log 2>&1 &
            gpu_id=$(($gpu_id+1))
        done
        wait
    done
done

# for model in gpt2-xl; do
#     for layers in 0 8 13,14,15,16,17 45,46,47; do
#         gpu_id=0
#         cnt="fast-ly:${layers}-tg_ctx-${method_type}-${wst_type}:${loss_div_alpha}-stop-v2:${early_stop_steps}-${module}-single_edit"
#         for datatype in "${!datasets[@]}"; do
#             data_dir="${datasets[$datatype]}"
#             echo "${DATE}/${method}-${method_type}/${model}-${datatype}-${gpu_id}-${cnt}"
#             CUDA_VISIBLE_DEVICES=${gpu_id} python examples/run_knowedit_llama2.py \
#                 --editing_method=${method} \
#                 --layers=${layers} \
#                 --module ${module} \
#                 --wst_type=${wst_type} \
#                 --loss_div_alpha=${loss_div_alpha} \
#                 --early_stop_steps=${early_stop_steps} \
#                 --p_dist=${method_type} \
#                 --single_edit \
#                 --datatype=${datatype} \
#                 --hparams_dir=./hparams/${method}/${model}.yaml \
#                 --objective_optimization=target_new_with_context \
#                 --data_dir=${data_dir} \
#                 --metrics_save_dir=./${method}-results/${model}/${method}-${method_type}-${DATE}-${cnt} \
#                 --gen_sentence_save_dir=./${method}-outputs/${model}/${method}-${method_type}-${DATE}-${cnt} \
#                 --pre_file=./pre_edit/${model}_${datatype}_pre_edit.json \
#                 > ./${method}-log/${DATE}/${method}-${method_type}/${model}-${datatype}-${gpu_id}-${cnt}.log 2>&1 &
#             gpu_id=$(($gpu_id+1))
#         done
#         wait
#     done
# done