#!/bin/bash
#SBATCH --gpus=4
#SBATCH -x paraai-n32-h-01-agent-[1-33],paraai-n32-h-01-agent-[48-56],paraai-n32-h-01-agent-[63-197]
export PYTHONUNBUFFERED=1

new_date=$(date +%Y-%m-%d)_${SLURM_JOB_ID}
if [ ! -d "log-test/$new_date" ]; then
    mkdir -p "log-test/$new_date"
fi

# 0 445 885
# [0, 326, 652, 978, 1304]

model_name=Meta-Llama-3-8B-Instruct
# model_name=gpt-j-6b
# model_name=Llama-2-7b-hf # EasyEdit也使用此模型
# model_name=Llama-2-7b-ms
data_type=counterfact
# data_path=../../EasyEdit/dataset/KnowEdit-ms/benchmark_wiki_counterfact_test_cf.json
data_path=../../EasyEdit/dataset/KnowEdit-ms/wiki_counterfact_test_cf_prompt10_v8.json
# data_type=zsre
# data_path=../../EasyEdit/dataset/KnowEdit-ms/benchmark_ZsRE_ZsRE-test-all.json

# module load compilers/cuda/12.1
# module load cudnn/8.8.1.3_cuda12.x
# module load compilers/gcc/11.3.0
# source activate ke2torch23cu121
# source activate ke
source activate torch23py310

# export HUGGINGFACE_CACHE=/home/bingxing2/public/models/llama3/ 
# export HUGGINGFACE_CACHE=/home/bingxing2/public/models/llama2/ 
# export HUGGINGFACE_CACHE=/home/bingxing2/home/scx7avs/lyc/huggingface/ 
# export HUGGINGFACE_CACHE=/share/huggingface/
export HUGGINGFACE_CACHE=/root/huggingface/

batch_size=2
for prob_type in {target_new,ground_truth,llm_answer}; do
    for ff_attrs in {self_attn.q_proj,self_attn.k_proj,self_attn.v_proj,mlp.up_proj,mlp.down_proj,mlp.gate_proj}; do
        for i in $(seq 0 2); do
            echo "$i $model_name $data_type $batch_size"
            start_idx=$(($i * 295))
            end_idx=$(($i * 295 + 2))
            CUDA_VISIBLE_DEVICES=$i python kn.py \
                --model_name $model_name \
                --start_idx $start_idx \
                --end_idx  $end_idx \
                --batch_size $batch_size \
                --data_type $data_type \
                --data_path $data_path \
                --transformer_layers_attr model.layers \
                --ff_attrs $ff_attrs \
                --prob_type $prob_type \
                --layer_idx 10,11 \
                --steps $batch_size \
                > log-test/$new_date/$i-up_proj-llama3-8b-$data_type-$start_idx-$end_idx-bs$batch_size-1.log 2>&1 &
        done
        wait
    done
done

# 已经完成
# 20,32

# v1版本 no_prompts表示不使用提示词,也不使用生成的generate_prompt 10
# oom或者磁盘空间爆满
# i=2
# skip=206 # 165还没有计算完 165+41
# i=1
# skip=147 # 120+7+20
# i=0
# # skip=68+19+119+63
# skip=269
# batch_size=5

# echo "$i $model_name $data_type $batch_size"
# start_idx=$(($i * 295 + $skip))
# # start_idx=$(($i * 295))
# end_idx=$(($i * 295 + 295))
# CUDA_VISIBLE_DEVICES=$i python kn.py \
#     --model_name $model_name \
#     --start_idx $start_idx \
#     --end_idx  $end_idx \
#     --batch_size $batch_size \
#     --data_type $data_type \
#     --data_path $data_path \
#     --transformer_layers_attr model.layers \
#     --ff_attrs mlp.up_proj \
#     --use_prompt \
#     --prob_type ground_truth \
#     --layer_idx 0,32 \
#     --steps 20 \
#     > log/$new_date/$i-up_proj-llama3-8b-$data_type-$start_idx-$end_idx-bs$batch_size-use-prompt-1.log 2>&1 &
