#!/bin/bash
export PYTHONUNBUFFERED=1

# model=gpt-j-6b
# model_name=gpt-j-6b
# layer_idx=0,28
# transformer_layers_attr=transformer.h
# ff_attrs=mlp.fc_in

# model=qwen-7b
# model_name=qwen-7b
# layer_idx=0,32
# transformer_layers_attr=transformer.h
# ff_attrs=mlp.w1

model=qwen1.5-7b
model_name=qwen1.5-7b
layer_idx=0,32
transformer_layers_attr=model.layers
ff_attrs=mlp.up_proj

# model=meta-llama-3-8b
# model_name=meta-llama-3-8b
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj


new_date=$(date +%Y-%m-%d)_${model}
if [ ! -d "log/$new_date" ]; then
    mkdir -p "log/$new_date"
fi

data_type=counterfact
data_path=../../EasyEdit/dataset/KnowEdit-ms/benchmark_wiki_counterfact_test_cf.json
# autodl
source activate base
export HUGGINGFACE_CACHE=/root/autodl-fs/
cache_dir=/root/autodl-fs/locate-knb-cache


batch_size=20
prob_type=target_new
next_token=answer_next_token

for i in $(seq 0 2); do
    start_idx=$(($i * 295))
    end_idx=$(($i * 295 + 295))
    echo $i-$next_token-$model-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs
    CUDA_VISIBLE_DEVICES=$i python kn.py \
        --cache_dir $cache_dir \
        --next_token $next_token \
        --model_name $model_name \
        --start_idx $start_idx \
        --end_idx  $end_idx \
        --batch_size $batch_size \
        --data_type $data_type \
        --data_path $data_path \
        --transformer_layers_attr $transformer_layers_attr \
        --ff_attrs $ff_attrs \
        --prob_type $prob_type \
        --layer_idx $layer_idx \
        --steps $batch_size \
        > log/$new_date/$i-$next_token-$model-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs-1.log 2>&1 &
done

# i=0
# start_idx=0
# end_idx=10
# echo $i-$next_token-$model-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs
# CUDA_VISIBLE_DEVICES=$i python kn.py \
#     --cache_dir $cache_dir \
#     --next_token $next_token \
#     --model_name $model_name \
#     --start_idx $start_idx \
#     --end_idx  $end_idx \
#     --batch_size $batch_size \
#     --data_type $data_type \
#     --data_path $data_path \
#     --transformer_layers_attr $transformer_layers_attr \
#     --ff_attrs $ff_attrs \
#     --prob_type $prob_type \
#     --layer_idx $layer_idx \
#     --steps $batch_size \
#     > log/$new_date/$i-$next_token-$model-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs-1.log 2>&1 &

# 数据并行
# batch_size=20
# prob_type=target_new
# for ff_attrs in {mlp.up_proj,mlp.gate_proj}; do
#     for next_token in {argmax_next_token,answer_next_token}; do
#         for i in $(seq 0 2); do
#             echo $i-$next_token-$model-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs
#             start_idx=$(($i * 295))
#             end_idx=$(($i * 295 + 295))
#             CUDA_VISIBLE_DEVICES=$i python kn.py \
#                 --cache_dir $cache_dir \
#                 --next_token $next_token \
#                 --model_name $model_name \
#                 --start_idx $start_idx \
#                 --end_idx  $end_idx \
#                 --batch_size $batch_size \
#                 --data_type $data_type \
#                 --data_path $data_path \
#                 --transformer_layers_attr $transformer_layers_attr \
#                 --ff_attrs $ff_attrs \
#                 --prob_type $prob_type \
#                 --layer_idx $layer_idx \
#                 --steps $batch_size \
#                 > log/$new_date/$i-$next_token-$model-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs-1.log 2>&1 &
#         done
#         wait
#     done
# done