File size: 1,244 Bytes
5a7ab71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
resources:
  accelerators: A100:4
  cloud: gcp

num_nodes: 1

workdir: .

setup: |
  conda activate chatbot
  if [ $? -eq 0 ]; then
    echo 'conda env exists'
  else
    # Setup the environment
    conda create -n chatbot python=3.10 -y
  fi
  conda activate chatbot

  pip3 install -e .

  # Install pytorch
  pip install torch==1.13.1+cu116 --extra-index-url https://download.pytorch.org/whl/cu116

  # Install huggingface with the LLaMA commit
  pip install git+https://github.com/huggingface/transformers.git@c612628045822f909020f7eb6784c79700813eda

  cd fastchat/eval
  pip install -r requirements.txt

  MODEL_NAME=vicuna-7b-20230322-fp16
  MODEL_PATH=~/${MODEL_NAME}

  if [ ! -f "$MODEL_PATH/ready" ]; then
    echo "export MODEL_PATH=${MODEL_PATH}" >> ~/.bashrc
    echo "export MODEL_NAME=${MODEL_NAME}" >> ~/.bashrc
    mkdir -p $MODEL_PATH
    gsutil -m cp gs://model-weights/${MODEL_NAME}/* $MODEL_PATH
    touch $MODEL_PATH/ready
    echo "model downloaded"
  fi

run: |
  conda activate chatbot
  python -m fastchat.eval.get_model_answer --model-path $MODEL_PATH \
    --model-id $MODEL_NAME \
    --question-file fastchat/eval/table/question.jsonl \
    --answer-file answer.jsonl \
    --num-gpus $SKYPILOT_NUM_GPUS_PER_NODE