lvkaokao
update codes.
5a7ab71
raw
history blame
1.24 kB
resources:
accelerators: A100:4
cloud: gcp
num_nodes: 1
workdir: .
setup: |
conda activate chatbot
if [ $? -eq 0 ]; then
echo 'conda env exists'
else
# Setup the environment
conda create -n chatbot python=3.10 -y
fi
conda activate chatbot
pip3 install -e .
# Install pytorch
pip install torch==1.13.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116
# Install huggingface with the LLaMA commit
pip install git+https://github.com/huggingface/transformers.git@c612628045822f909020f7eb6784c79700813eda
cd fastchat/eval
pip install -r requirements.txt
MODEL_NAME=vicuna-7b-20230322-fp16
MODEL_PATH=~/${MODEL_NAME}
if [ ! -f "$MODEL_PATH/ready" ]; then
echo "export MODEL_PATH=${MODEL_PATH}" >> ~/.bashrc
echo "export MODEL_NAME=${MODEL_NAME}" >> ~/.bashrc
mkdir -p $MODEL_PATH
gsutil -m cp gs://model-weights/${MODEL_NAME}/* $MODEL_PATH
touch $MODEL_PATH/ready
echo "model downloaded"
fi
run: |
conda activate chatbot
python -m fastchat.eval.get_model_answer --model-path $MODEL_PATH \
--model-id $MODEL_NAME \
--question-file fastchat/eval/table/question.jsonl \
--answer-file answer.jsonl \
--num-gpus $SKYPILOT_NUM_GPUS_PER_NODE