#!/bin/bash
export PYTHONUNBUFFERED=1

# for ff_attrs in mlp.up_proj mlp.gate_proj mlp.down_proj self_attn.q_proj self_attn.k_proj self_attn.v_proj self_attn.o_proj; do
# for ff_attrs in attn.q_proj attn.k_proj attn.v_proj attn.out_proj mlp.fc_out mlp.fc_in; do

# model=qwen-7b
# model_name=Qwen-7B
# layer_idx=0,32
# transformer_layers_attr=transformer.h
# ff_attrs=mlp.w1

# model=qwen1.5-7b
# model_name=qwen1.5-7b
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj

# model=qwen2-7b
# model_name=qwen2-7b
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj

# model=llama3-8b
# model_name=Meta-Llama-3-8B
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj

# model=llama2-7b
# model_name=Llama-2-7b-ms
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj

# model=llama2-7b-chat
# model_name=llama-2-7b-chat-ms
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj



new_date=$(date +%Y-%m-%d)_${model}
if [ ! -d "log/$new_date" ]; then
    mkdir -p "log/$new_date"
fi

# data_type=counterfact
# data_path=../../EasyEdit/dataset/KnowEdit-ms/benchmark_wiki_counterfact_test_cf.json
# data_type=zsre
# data_path=../../EasyEdit/dataset/KnowEdit-ms/benchmark_ZsRE_ZsRE-test-all.json
data_type=recent
data_path=../../EasyEdit/dataset/KnowEdit-ms/benchmark_wiki_recent_recent_test.json
# 3090
# source activate ke2torch23cu121
export HUGGINGFACE_CACHE=/share/huggingface/
cache_dir=/share/lyc/


batch_size=1
prob_type=target_new
next_token=answer_next_token  # answer_next_token,argmax_next_token
batch=100

source activate mamba
model_name=falcon-mamba-7b-instruct
layer_idx=0,64
transformer_layers_attr=backbone.layers
for ff_attrs in mixer.in_proj mixer.x_proj mixer.dt_proj mixer.out_proj; do
    for i in $(seq 0 3); do
        start_idx=$(($i * $batch))
        end_idx=$(($i * $batch + $batch))
        echo $i-$next_token-$model_name-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs
        CUDA_VISIBLE_DEVICES=$i python kn.py \
            --cache_dir $cache_dir \
            --next_token $next_token \
            --model_name $model_name \
            --start_idx $start_idx \
            --end_idx  $end_idx \
            --batch_size $batch_size \
            --data_type $data_type \
            --data_path $data_path \
            --transformer_layers_attr $transformer_layers_attr \
            --ff_attrs $ff_attrs \
            --prob_type $prob_type \
            --layer_idx $layer_idx \
            --steps $batch_size \
            > log/$new_date/$i-$next_token-$model_name-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs-1.log 2>&1 &
    done
    wait
done
wait

# model_name=llama2-7b-chat
# layer_idx=0,32
# transformer_layers_attr=model.layers

# for ff_attrs in mlp.up_proj mlp.down_proj; do
#     for i in $(seq 0 3); do
#         start_idx=$(($i * $batch))
#         end_idx=$(($i * $batch + $batch))
#         echo $i-$next_token-$model_name-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs
#         CUDA_VISIBLE_DEVICES=$i python kn.py \
#             --cache_dir $cache_dir \
#             --next_token $next_token \
#             --model_name $model_name \
#             --start_idx $start_idx \
#             --end_idx  $end_idx \
#             --batch_size $batch_size \
#             --data_type $data_type \
#             --data_path $data_path \
#             --transformer_layers_attr $transformer_layers_attr \
#             --ff_attrs $ff_attrs \
#             --prob_type $prob_type \
#             --layer_idx $layer_idx \
#             --steps $batch_size \
#             > log/$new_date/$i-$next_token-$model_name-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs-1.log 2>&1 &
#     done
#     wait
# done
# wait

# model_name=Qwen2-7B-Instruct
# layer_idx=0,28
# transformer_layers_attr=model.layers

# for ff_attrs in mlp.up_proj mlp.down_proj; do
#     for i in $(seq 0 3); do
#         start_idx=$(($i * $batch))
#         end_idx=$(($i * $batch + $batch))
#         echo $i-$next_token-$model_name-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs
#         CUDA_VISIBLE_DEVICES=$i python kn.py \
#             --cache_dir $cache_dir \
#             --next_token $next_token \
#             --model_name $model_name \
#             --start_idx $start_idx \
#             --end_idx  $end_idx \
#             --batch_size $batch_size \
#             --data_type $data_type \
#             --data_path $data_path \
#             --transformer_layers_attr $transformer_layers_attr \
#             --ff_attrs $ff_attrs \
#             --prob_type $prob_type \
#             --layer_idx $layer_idx \
#             --steps $batch_size \
#             > log/$new_date/$i-$next_token-$model_name-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs-1.log 2>&1 &
#     done
#     wait
# done
# wait

# model_name=gpt-j-6b
# layer_idx=0,28
# transformer_layers_attr=transformer.h
# for ff_attrs in mlp.fc_out mlp.fc_in; do
#     for i in $(seq 0 3); do
#         start_idx=$(($i * $batch))
#         end_idx=$(($i * $batch + $batch))
#         echo $i-$next_token-$model_name-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs
#         CUDA_VISIBLE_DEVICES=$i python kn.py \
#             --cache_dir $cache_dir \
#             --next_token $next_token \
#             --model_name $model_name \
#             --start_idx $start_idx \
#             --end_idx  $end_idx \
#             --batch_size $batch_size \
#             --data_type $data_type \
#             --data_path $data_path \
#             --transformer_layers_attr $transformer_layers_attr \
#             --ff_attrs $ff_attrs \
#             --prob_type $prob_type \
#             --layer_idx $layer_idx \
#             --steps $batch_size \
#             > log/$new_date/$i-$next_token-$model_name-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs-1.log 2>&1 &
#     done
#     wait
# done
# wait
