|
|
|
|
|
|
|
|
|
|
|
FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04 |
|
ARG useProxyNetwork='' |
|
RUN apt-get update |
|
RUN apt-get install -y curl proxychains curl |
|
RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8 |
|
|
|
|
|
WORKDIR /gpt |
|
RUN $useProxyNetwork git clone https://github.com/binary-husky/chatgpt_academic.git -b v3.1 |
|
WORKDIR /gpt/chatgpt_academic |
|
RUN $useProxyNetwork python3 -m pip install -r requirements.txt |
|
RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt |
|
RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113 |
|
|
|
|
|
RUN echo ' \n\ |
|
from transformers import AutoModel, AutoTokenizer \n\ |
|
chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) \n\ |
|
chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() ' >> warm_up_chatglm.py |
|
RUN python3 -u warm_up_chatglm.py |
|
|
|
|
|
ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache |
|
RUN $useProxyNetwork git pull |
|
|
|
|
|
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()' |
|
|
|
|
|
|
|
|
|
|
|
RUN echo ' \n\ |
|
API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\ |
|
USE_PROXY = True \n\ |
|
LLM_MODEL = "chatglm" \n\ |
|
LOCAL_MODEL_DEVICE = "cpu" \n\ |
|
proxies = { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' >> config_private.py |
|
|
|
|
|
CMD ["python3", "-u", "main.py"] |
|
|