|
export WANDB_MODE=disabled |
|
export CUDA_LAUNCH_BLOCKING=1 |
|
|
|
DATASET=Instruments |
|
BASE_MODEL=/datain/v-yinju/llama-7b |
|
INTER=/datain/v-yinju/LLMBased_Multimodal_RS/Data/Musical_Instruments/Musical_Instruments.inters.numerical.json |
|
FEATURE=/datain/v-yinju/LLMBased_Multimodal_RS/Data/Musical_Instruments/Musical_Instruments.features.numerical.json |
|
INDEX=/datain/v-yinju/LLMBased_Multimodal_RS/Data/Musical_Instruments/Rqvae/Text/indices.json |
|
OUTPUT_DIR=/datain/v-yinju/LLMBased_Multimodal_RS/Model_LLaMA/Ins |
|
|
|
mkdir -p $OUTPUT_DIR |
|
|
|
torchrun --nproc_per_node=2 lora_finetune.py \ |
|
--base_model $BASE_MODEL \ |
|
--output_dir $OUTPUT_DIR \ |
|
--inter_path $INTER \ |
|
--feature_path $FEATURE \ |
|
--per_device_batch_size 6 \ |
|
--gradient_accumulation_steps 2 \ |
|
--learning_rate 5e-5 \ |
|
--epochs 4 \ |
|
--weight_decay 0.01 \ |
|
--save_and_eval_strategy epoch \ |
|
--fp16 \ |
|
--deepspeed ./config/ds_z2_fp16.json \ |
|
--dataloader_num_workers 4 \ |
|
--only_train_response \ |
|
--tasks seqrec,item2index,index2item,fusionseqrec \ |
|
--train_prompt_sample_num 1,1,1,1 \ |
|
--train_data_sample_num 0,0,0,0 \ |
|
--index_file $INDEX |
|
|
|
cd convert |
|
nohup ./convert.sh $OUTPUT_DIR >convert.log 2>&1 & |
|
cd .. |