textual_inversion_scnnt_710 / run_inversion.sh
georgeNakayama's picture
End of training
9c66c84
#!/bin/bash
#SBATCH --partition=orion --qos=normal
# #SBATCH --time=96:00:00 --> this is a comment, you can choose to not specify a nodelist, it will randomly assign to a GPU
#SBATCH --nodes=1
#SBATCH --cpus-per-task=8
#SBATCH --mem=12G
#SBATCH --account=orion
# only use the following on partition with GPUs
# only use the following if yo####SBATCH --mail-user=youremailaddress
####SBATCH --mail-type=ALLu want email notification
# list out some useful information (optional)
echo "SLURM_JOBID="$SLURM_JOBID
echo "SLURM_JOB_NODELIST"=$SLURM_JOB_NODELIST
echo "SLURM_NNODES"=$SLURM_NNODES
echo "SLURMTMPDIR="$SLURMTMPDIR
echo "working directory = "$SLURM_SUBMIT_DIR
# sample process (list hostnames of the nodes you've requested)
# NPROCS=`srun --nodes=${SLURM_NNODES} bash -c 'hostname' |wc -l`
# echo NPROCS=$NPROCS
# can try the following to list out which GPU you have access to
#srun /usr/local/cuda/samples/1_Utilities/deviceQuery/deviceQuery
source ~/.bashrc
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda-$cuda_version/lib64
export PATH=${PATH}:/usr/local/cuda-$cuda_version/bin
export CUDA_HOME=/usr/local/cuda-$cuda_version
cd $path
conda activate $conda
accelerate launch textual_inversion.py \
--pretrained_model_name_or_path=runwayml/stable-diffusion-v1-5 \
--train_data_dir=scnnt_710 \
--learnable_property="style" \
--placeholder_token="<scnnt_710>" --initializer_token="room" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--max_train_steps=3000 \
--learning_rate=5.0e-04 --scale_lr \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--push_to_hub \
--output_dir="textual_inversion_scnnt_710"