Spaces:
Sleeping
Sleeping
修正多GPU选择的说明
Browse files- docs/Dockerfile+ChatGLM +4 -2
docs/Dockerfile+ChatGLM
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
# How to build | 如何构建: docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
|
2 |
-
# How to run |
|
3 |
-
# How to run |
|
4 |
|
5 |
# 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
|
6 |
FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
|
@@ -14,6 +14,7 @@ RUN apt-get install -y git python python3 python-dev python3-dev --fix-missing
|
|
14 |
RUN $useProxyNetwork curl cip.cc
|
15 |
RUN sed -i '$ d' /etc/proxychains.conf
|
16 |
RUN sed -i '$ d' /etc/proxychains.conf
|
|
|
17 |
RUN echo "socks5 127.0.0.1 10880" >> /etc/proxychains.conf
|
18 |
ARG useProxyNetwork=proxychains
|
19 |
# # comment out above if you do not need proxy network | 如果不需要翻墙 - 从此行向上删除
|
@@ -49,6 +50,7 @@ RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
|
49 |
# 可同时填写多个API-KEY,支持openai的key和api2d的key共存,用英文逗号分割,例如API_KEY = "sk-openaikey1,fkxxxx-api2dkey2,........"
|
50 |
# LLM_MODEL 是选择初始的模型
|
51 |
# LOCAL_MODEL_DEVICE 是选择chatglm等本地模型运行的设备,可选 cpu 和 cuda
|
|
|
52 |
RUN echo ' \n\
|
53 |
API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\
|
54 |
USE_PROXY = True \n\
|
|
|
1 |
# How to build | 如何构建: docker build -t gpt-academic --network=host -f Dockerfile+ChatGLM .
|
2 |
+
# How to run | (1) 我想直接一键运行(选择0号GPU): docker run --rm -it --net=host --gpus \"device=0\" gpt-academic
|
3 |
+
# How to run | (2) 我想运行之前进容器做一些调整(选择1号GPU): docker run --rm -it --net=host --gpus \"device=1\" gpt-academic bash
|
4 |
|
5 |
# 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
|
6 |
FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
|
|
|
14 |
RUN $useProxyNetwork curl cip.cc
|
15 |
RUN sed -i '$ d' /etc/proxychains.conf
|
16 |
RUN sed -i '$ d' /etc/proxychains.conf
|
17 |
+
# 在这里填写主机的代理协议(用于从github拉取代码)
|
18 |
RUN echo "socks5 127.0.0.1 10880" >> /etc/proxychains.conf
|
19 |
ARG useProxyNetwork=proxychains
|
20 |
# # comment out above if you do not need proxy network | 如果不需要翻墙 - 从此行向上删除
|
|
|
50 |
# 可同时填写多个API-KEY,支持openai的key和api2d的key共存,用英文逗号分割,例如API_KEY = "sk-openaikey1,fkxxxx-api2dkey2,........"
|
51 |
# LLM_MODEL 是选择初始的模型
|
52 |
# LOCAL_MODEL_DEVICE 是选择chatglm等本地模型运行的设备,可选 cpu 和 cuda
|
53 |
+
# [说明: 以下内容与`config.py`一一对应,请查阅config.py来完成一下配置的填写]
|
54 |
RUN echo ' \n\
|
55 |
API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,fkxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\
|
56 |
USE_PROXY = True \n\
|