启动ChatGPT+ChatGLM
Browse files- Dockerfile+ChatGLM +11 -8
 
    	
        Dockerfile+ChatGLM
    CHANGED
    
    | 
         @@ -1,6 +1,6 @@ 
     | 
|
| 1 | 
         
             
            # How to build | 如何构建: docker build -t gpt-academic --network=host  -f Dockerfile+ChatGLM .
         
     | 
| 2 | 
         
            -
            # How to run | 如何运行 (1) 直接运行: docker run --rm -it --net=host gpt-academic
         
     | 
| 3 | 
         
            -
            # How to run | 如何运行 (2) 我想运行之前进容器做一些调整: docker run --rm -it --net=host -- 
     | 
| 4 | 
         | 
| 5 | 
         
             
            # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
         
     | 
| 6 | 
         
             
            FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
         
     | 
| 
         @@ -30,18 +30,21 @@ RUN $useProxyNetwork python3 -m pip install -r requirements.txt 
     | 
|
| 30 | 
         
             
            RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
         
     | 
| 31 | 
         
             
            RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
         
     | 
| 32 | 
         | 
| 33 | 
         
            -
            # 为chatgpt-academic配置代理和API-KEY (非必要 可选步骤)
         
     | 
| 34 | 
         
            -
            RUN echo ' \n\
         
     | 
| 35 | 
         
            -
            API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\
         
     | 
| 36 | 
         
            -
            USE_PROXY = True \n\
         
     | 
| 37 | 
         
            -
            proxies = { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' >> config_private.py
         
     | 
| 38 | 
         
            -
             
     | 
| 39 | 
         
             
            # 预热CHATGLM参数(非必要 可选步骤)
         
     | 
| 40 | 
         
             
            RUN echo ' \n\
         
     | 
| 41 | 
         
             
            from transformers import AutoModel, AutoTokenizer \n\
         
     | 
| 42 | 
         
             
            chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) \n\
         
     | 
| 43 | 
         
             
            chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() ' >> warm_up_chatglm.py
         
     | 
| 44 | 
         
             
            RUN python3 -u warm_up_chatglm.py
         
     | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 45 | 
         | 
| 46 | 
         
             
            # 启动
         
     | 
| 47 | 
         
             
            CMD ["python3", "-u", "main.py"]
         
     | 
| 
         | 
|
| 1 | 
         
             
            # How to build | 如何构建: docker build -t gpt-academic --network=host  -f Dockerfile+ChatGLM .
         
     | 
| 2 | 
         
            +
            # How to run | 如何运行 (1) 直接运行: docker run --rm -it --net=host --gpus=all gpt-academic
         
     | 
| 3 | 
         
            +
            # How to run | 如何运行 (2) 我想运行之前进容器做一些调整: docker run --rm -it --net=host --gpus=all gpt-academic bash
         
     | 
| 4 | 
         | 
| 5 | 
         
             
            # 从NVIDIA源,从而支持显卡运损(检查宿主的nvidia-smi中的cuda版本必须>=11.3)
         
     | 
| 6 | 
         
             
            FROM nvidia/cuda:11.3.1-runtime-ubuntu20.04
         
     | 
| 
         | 
|
| 30 | 
         
             
            RUN $useProxyNetwork python3 -m pip install -r request_llm/requirements_chatglm.txt
         
     | 
| 31 | 
         
             
            RUN $useProxyNetwork python3 -m pip install torch --extra-index-url https://download.pytorch.org/whl/cu113
         
     | 
| 32 | 
         | 
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 33 | 
         
             
            # 预热CHATGLM参数(非必要 可选步骤)
         
     | 
| 34 | 
         
             
            RUN echo ' \n\
         
     | 
| 35 | 
         
             
            from transformers import AutoModel, AutoTokenizer \n\
         
     | 
| 36 | 
         
             
            chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) \n\
         
     | 
| 37 | 
         
             
            chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float() ' >> warm_up_chatglm.py
         
     | 
| 38 | 
         
             
            RUN python3 -u warm_up_chatglm.py
         
     | 
| 39 | 
         
            +
            RUN $useProxyNetwork git pull
         
     | 
| 40 | 
         
            +
             
     | 
| 41 | 
         
            +
            # 为chatgpt-academic配置代理和API-KEY (非必要 可选步骤)
         
     | 
| 42 | 
         
            +
            RUN echo ' \n\
         
     | 
| 43 | 
         
            +
            API_KEY = "sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \n\
         
     | 
| 44 | 
         
            +
            USE_PROXY = True \n\
         
     | 
| 45 | 
         
            +
            LLM_MODEL = "chatglm" \n\
         
     | 
| 46 | 
         
            +
            LOCAL_MODEL_DEVICE = "cuda" \n\
         
     | 
| 47 | 
         
            +
            proxies = { "http": "socks5h://localhost:10880", "https": "socks5h://localhost:10880", } ' >> config_private.py
         
     | 
| 48 | 
         | 
| 49 | 
         
             
            # 启动
         
     | 
| 50 | 
         
             
            CMD ["python3", "-u", "main.py"]
         
     |