qingxu99 commited on
Commit
e92ae1e
1 Parent(s): 986e646

Try Github Actions

Browse files
docs/GithubAction+ChatGLM+Moss ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
3
+ FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
4
+ ARG useProxyNetwork=''
5
+ RUN apt-get update
6
+ RUN apt-get install -y curl proxychains curl
7
+ RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
8
+
9
+
10
+ # use python3 as the system default python
11
+ RUN curl -sS https://bootstrap.pypa.io/get-pip.py | python3.8
12
+ # 下载pytorch
13
+ RUN python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
14
+ # 下载分支
15
+ WORKDIR /gpt
16
+ RUN git clone https://github.com/binary-husky/chatgpt_academic.git
17
+ WORKDIR /gpt/chatgpt_academic
18
+ RUN git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss
19
+ RUN python3 -m pip install -r requirements.txt
20
+ RUN python3 -m pip install -r request_llm/requirements_moss.txt
21
+ RUN python3 -m pip install -r request_llm/requirements_chatglm.txt
22
+ RUN python3 -m pip install -r request_llm/requirements_newbing.txt
23
+
24
+ # 预热CHATGLM参数(非必要 可选步骤)
25
+ RUN echo ' \n\
26
+ from transformers import AutoModel, AutoTokenizer \n\
27
+ chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) \n\
28
+ chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() ' >> warm_up_chatglm.py
29
+ RUN python3 -u warm_up_chatglm.py
30
+
31
+ # 禁用缓存,确保更新代码
32
+ ADD "https://www.random.org/cgi-bin/randbyte?nbytes=10&format=h" skipcache
33
+ RUN git pull
34
+
35
+ # 预热Tiktoken模块
36
+ RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
37
+
38
+ # 启动
39
+ CMD ["python3", "-u", "main.py"]
docs/{Dockerfile+NoLocal → GithubAction+NoLocal} RENAMED
File without changes