#!/bin/bash
export PYTHONUNBUFFERED=1

# 1.选择模型和层(从后面复制对应的模型)
model=gpt-j-6b
model_name=gpt-j-6b
layer_idx=0,28
transformer_layers_attr=transformer.h
ff_attrs=mlp.fc_in

new_date=$(date +%Y-%m-%d)_${model}
if [ ! -d "log/$new_date" ]; then
    mkdir -p "log/$new_date"
fi

# 2.数据集
data_type=counterfact
data_path=../../EasyEdit/dataset/KnowEdit-ms/benchmark_wiki_counterfact_test_cf.json
# 3090 # conda环境
source activate ke2torch23cu121
export HUGGINGFACE_CACHE=/share/huggingface/
# 3.梯度归因分数保存路径
cache_dir=./

# 默认
batch_size=20
prob_type=target_new
next_token=answer_next_token  # answer_next_token,argmax_next_token

# 4.这块数据并行,885条数据分到3块gpu上
for i in $(seq 1 3); do
    # start_idx=$(($i * 295))
    # end_idx=$(($i * 295 + 295))
    start_idx=$(($i * 295 - 295))
    end_idx=$(($i * 295))
    echo $i-$next_token-$model-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs
    CUDA_VISIBLE_DEVICES=$i python kn.py \
        --cache_dir $cache_dir \
        --next_token $next_token \
        --model_name $model_name \
        --start_idx $start_idx \
        --end_idx  $end_idx \
        --batch_size $batch_size \
        --data_type $data_type \
        --data_path $data_path \
        --transformer_layers_attr $transformer_layers_attr \
        --ff_attrs $ff_attrs \
        --prob_type $prob_type \
        --layer_idx $layer_idx \
        --steps $batch_size \
        > log/$new_date/$i-$next_token-$model-$data_type-$layer_idx-$start_idx-$end_idx-bs$batch_size-$prob_type-$ff_attrs-1.log 2>&1 &
done
wait

# model=gpt2
# model_name=gpt2
# layer_idx=0,12
# transformer_layers_attr=transformer.h
# ff_attrs=mlp.c_fc


# model=gpt2-medium
# model_name=gpt2-medium
# layer_idx=0,24
# transformer_layers_attr=transformer.h
# ff_attrs=mlp.c_fc


# model=gpt2-xl
# model_name=gpt2-xl
# layer_idx=0,48
# transformer_layers_attr=transformer.h
# ff_attrs=mlp.c_fc


# model=gpt-neo-1.3b
# model_name=gpt-neo-1.3b
# layer_idx=0,24
# transformer_layers_attr=transformer.h
# ff_attrs=mlp.c_fc


# model=gpt-neo-2.7b
# model_name=gpt-neo-2.7b
# layer_idx=0,32
# transformer_layers_attr=transformer.h
# ff_attrs=mlp.c_fc


# model=gpt-neo-125m
# model_name=gpt-neo-125m
# layer_idx=0,12
# transformer_layers_attr=transformer.h
# ff_attrs=mlp.c_fc

# model=qwen-7b
# model_name=Qwen-7B
# layer_idx=0,32
# transformer_layers_attr=transformer.h
# ff_attrs=mlp.w1

# model=qwen1.5-7b
# model_name=qwen1.5-7b
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj

# model=qwen2-7b
# model_name=qwen2-7b
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj

# model=llama3-8b
# model_name=Meta-Llama-3-8B
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj

# model=llama2-7b
# model_name=Llama-2-7b-ms
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj

# model=llama2-7b-chat
# model_name=llama-2-7b-chat-ms
# layer_idx=0,32
# transformer_layers_attr=model.layers
# ff_attrs=mlp.up_proj

