jkeisling commited on
Commit
2c55da9
·
1 Parent(s): d00a8fd

Initial commit

Browse files
Files changed (5) hide show
  1. Dockerfile-ggml-cpp-wheel +45 -0
  2. Dockerfile-llama-cpp-wheel +50 -0
  3. app.py +148 -0
  4. config.yml +13 -0
  5. requirements.txt +6 -0
Dockerfile-ggml-cpp-wheel ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu18.04
2
+
3
+ ARG CTRANSFORMERS_VERSION="0.2.5"
4
+ ARG CMAKE_VERSION=3.26
5
+ ARG CMAKE_VERSION_PATCH=3.26.3
6
+ ARG CMAKE_OS=linux
7
+ ARG DEBIAN_FRONTEND=noninteractive
8
+ ENV TZ=UTC
9
+
10
+ RUN apt-get update && \
11
+ apt-get install --no-install-recommends -y \
12
+ curl git vim build-essential software-properties-common python3 python3-pip python3-dev python3-venv \
13
+ libffi-dev libncurses5-dev zlib1g zlib1g-dev libreadline-dev libbz2-dev libsqlite3-dev libssl-dev \
14
+ libblas-dev liblapack-dev cmake && \
15
+ add-apt-repository ppa:ubuntu-toolchain-r/test && \
16
+ apt-get update && \
17
+ apt install --no-install-recommends -y gcc-10 g++-10 && \
18
+ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 --slave /usr/bin/g++ g++ /usr/bin/g++-10 --slave /usr/bin/gcov gcov /usr/bin/gcov-10 && \
19
+ rm -rf /var/lib/apt/lists/* && \
20
+ pip3 install scikit-build
21
+ RUN curl -L https://cmake.org/files/v$CMAKE_VERSION/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh -o /tmp/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh && \
22
+ mkdir /opt/cmake && \
23
+ sh /tmp/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh --skip-license --prefix=/opt/cmake && \
24
+ ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
25
+
26
+ RUN useradd -m -u 1000 appuser
27
+
28
+ WORKDIR /build
29
+ RUN chown appuser:appuser /build
30
+ USER appuser
31
+
32
+ ENV HOME /home/appuser
33
+ ENV PYENV_ROOT $HOME/.pyenv
34
+ ENV PATH $PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH
35
+
36
+ RUN git clone --depth 1 --branch v$CTRANSFORMERS_VERSION https://github.com/marella/ctransformers.git /build
37
+ RUN curl https://pyenv.run | bash
38
+
39
+ RUN pyenv install 3.8.9 && \
40
+ pyenv global 3.8.9 && \
41
+ pyenv rehash && \
42
+ pip install --no-cache-dir --upgrade pip==22.3.1 setuptools wheel && \
43
+ pip install --no-cache-dir datasets "huggingface-hub>=0.12.1" "protobuf<4" "click<8.1" "scikit-build" && \
44
+ CT_CUBLAS=1 python3 setup.py bdist_wheel && \
45
+ ls -l /build/dist/
Dockerfile-llama-cpp-wheel ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu18.04
2
+
3
+ ARG LLAMA_CPP_VERSION="v0.1.53"
4
+ ARG CMAKE_VERSION=3.26
5
+ ARG CMAKE_VERSION_PATCH=3.26.3
6
+ ARG CMAKE_OS=linux
7
+ ARG DEBIAN_FRONTEND=noninteractive
8
+ ENV TZ=UTC
9
+
10
+ RUN apt-get update && \
11
+ apt-get install --no-install-recommends -y \
12
+ curl git vim build-essential software-properties-common python3 python3-pip python3-dev python3-venv \
13
+ libffi-dev libncurses5-dev zlib1g zlib1g-dev libreadline-dev libbz2-dev libsqlite3-dev libssl-dev \
14
+ libblas-dev liblapack-dev cmake && \
15
+ add-apt-repository ppa:ubuntu-toolchain-r/test && \
16
+ apt-get update && \
17
+ apt install --no-install-recommends -y gcc-10 g++-10 && \
18
+ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 --slave /usr/bin/g++ g++ /usr/bin/g++-10 --slave /usr/bin/gcov gcov /usr/bin/gcov-10 && \
19
+ rm -rf /var/lib/apt/lists/* && \
20
+ pip3 install scikit-build
21
+ RUN curl -L https://cmake.org/files/v$CMAKE_VERSION/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh -o /tmp/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh && \
22
+ mkdir /opt/cmake && \
23
+ sh /tmp/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh --skip-license --prefix=/opt/cmake && \
24
+ ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
25
+
26
+ RUN useradd -m -u 1000 appuser
27
+
28
+ WORKDIR /build
29
+ RUN chown appuser:appuser /build
30
+ USER appuser
31
+
32
+ ENV HOME /home/appuser
33
+ ENV PYENV_ROOT $HOME/.pyenv
34
+ ENV PATH $PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH
35
+
36
+ RUN git clone --depth 1 --branch $LLAMA_CPP_VERSION https://github.com/abetlen/llama-cpp-python.git /build
37
+ RUN git clone https://github.com/ggerganov/llama.cpp.git /build/vendor/llama.cpp
38
+ RUN curl https://pyenv.run | bash
39
+
40
+ RUN pyenv install 3.8.9 && \
41
+ pyenv global 3.8.9 && \
42
+ pyenv rehash && \
43
+ pip install --no-cache-dir --upgrade pip==22.3.1 setuptools wheel && \
44
+ pip install --no-cache-dir datasets "huggingface-hub>=0.12.1" "protobuf<4" "click<8.1" "scikit-build" && \
45
+ CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 python3 setup.py bdist_wheel && \
46
+ mkdir /build/dists/ && \
47
+ cp dist/llama_cpp_python-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl dists/llama_cpp_python-gpu-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl && \
48
+ CMAKE_ARGS="-DLLAMA_CUBLAS=off" FORCE_CMAKE=1 python3 setup.py bdist_wheel && \
49
+ cp dist/llama_cpp_python-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl dists/llama_cpp_python-cpu-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl && \
50
+ ls -l /build/dists/
app.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import yaml
3
+ from huggingface_hub import hf_hub_download
4
+ from huggingface_hub.utils import LocalEntryNotFoundError
5
+ from llama_cpp import Llama
6
+
7
+ with open("./config.yml", "r") as f:
8
+ config = yaml.load(f, Loader=yaml.Loader)
9
+ while True:
10
+ try:
11
+ load_config = config.copy()
12
+ hub_config = load_config["hub"].copy()
13
+ repo_id = hub_config.pop("repo_id")
14
+ filename = hub_config.pop("filename")
15
+ fp = hf_hub_download(
16
+ repo_id=repo_id, filename=filename, **hub_config
17
+ )
18
+ break
19
+ except LocalEntryNotFoundError as e:
20
+ if "Connection error" in str(e):
21
+ print(str(e) + ", retrying...")
22
+ else:
23
+ raise(e)
24
+
25
+ llm = Llama(model_path=fp, **config["llama_cpp"])
26
+
27
+
28
+ def user(message, history):
29
+ history = history or []
30
+ # Append the user's message to the conversation history
31
+ history.append([message, ""])
32
+ return "", history
33
+
34
+
35
+ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
36
+ history = history or []
37
+
38
+ messages = system_message + \
39
+ "\n".join(["\n".join(["Human: "+item[0], "Laura: "+item[1]])
40
+ for item in history])
41
+
42
+ # remove last space from assistant, some models output a ZWSP if you leave a space
43
+ messages = messages[:-1]
44
+
45
+ history[-1][1] = ""
46
+ for output in llm(
47
+ messages,
48
+ echo=False,
49
+ stream=True,
50
+ max_tokens=max_tokens,
51
+ temperature=temperature,
52
+ top_p=top_p,
53
+ top_k=top_k,
54
+ repeat_penalty=repeat_penalty,
55
+ **config['chat']
56
+ ):
57
+ answer = output['choices'][0]['text']
58
+ history[-1][1] += answer
59
+ # stream the response
60
+ yield history, history
61
+
62
+
63
+ def clear_chat(chat_history_state, chat_message):
64
+ chat_history_state = []
65
+ chat_message = ''
66
+ return chat_history_state, chat_message
67
+
68
+
69
+ start_message = """
70
+ This is a conversation between the helpful, cynical, and well-spoken AI chatbot Laura and a human. Laura's responses informatively, insightfully, and delightfully answer the human's request. Continue the chat from Laura's perspective.
71
+
72
+ ---
73
+
74
+ """
75
+
76
+
77
+ def generate_text_instruct(input_text):
78
+ response = ""
79
+ for output in llm(f"### Instruction:\n{input_text}\n\n### Response:\n", echo=False, stream=True, **config['chat']):
80
+ answer = output['choices'][0]['text']
81
+ response += answer
82
+ yield response
83
+
84
+
85
+ instruct_interface = gr.Interface(
86
+ fn=generate_text_instruct,
87
+ inputs=gr.inputs.Textbox(lines= 10, label="Enter your input text"),
88
+ outputs=gr.outputs.Textbox(label="Output text"),
89
+ )
90
+
91
+ with gr.Blocks() as demo:
92
+ with gr.Row():
93
+ with gr.Column():
94
+ gr.Markdown(f"""
95
+ ### brought to you by OpenAccess AI Collective
96
+ - This is the [{config["hub"]["repo_id"]}](https://huggingface.co/{config["hub"]["repo_id"]}) model file [{config["hub"]["filename"]}](https://huggingface.co/{config["hub"]["repo_id"]}/blob/main/{config["hub"]["filename"]})
97
+ - This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM.
98
+ - This is running on a smaller, shared GPU, so it may take a few seconds to respond.
99
+ - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
100
+ - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml)
101
+ - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
102
+ - Many thanks to [TheBloke](https://huggingface.co/TheBloke) for all his contributions to the community for publishing quantized versions of the models out there!
103
+ """)
104
+ with gr.Tab("Instruct"):
105
+ gr.Markdown("# GGML Spaces Instruct Demo")
106
+ instruct_interface.render()
107
+
108
+ with gr.Tab("Chatbot"):
109
+ gr.Markdown("# GGML Spaces Chatbot Demo")
110
+ chatbot = gr.Chatbot()
111
+ with gr.Row():
112
+ message = gr.Textbox(
113
+ label="What do you want to chat about?",
114
+ placeholder="Ask me anything.",
115
+ lines=1,
116
+ )
117
+ with gr.Row():
118
+ submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
119
+ clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
120
+ stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
121
+ with gr.Row():
122
+ with gr.Column():
123
+ max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300)
124
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8)
125
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95)
126
+ top_k = gr.Slider(0, 100, label="Top K", step=1, value=40)
127
+ repeat_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1)
128
+
129
+ system_msg = gr.Textbox(
130
+ start_message, label="System Message", interactive=False, visible=False)
131
+
132
+ chat_history_state = gr.State()
133
+ clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
134
+ clear.click(lambda: None, None, chatbot, queue=False)
135
+
136
+ submit_click_event = submit.click(
137
+ fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
138
+ ).then(
139
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
140
+ )
141
+ message_submit_event = message.submit(
142
+ fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
143
+ ).then(
144
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
145
+ )
146
+ stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False)
147
+
148
+ demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860)
config.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ hub:
3
+ repo_id: jkeisling/laura-openllama13b-600bt-ggml
4
+ filename: laura-openllama13b-600bt-q6_K.bin
5
+ llama_cpp:
6
+ n_ctx: 2048
7
+ n_gpu_layers: 40 # llama 13b has 40 layers
8
+ chat:
9
+ stop:
10
+ - "Human:"
11
+ queue:
12
+ max_size: 16
13
+ concurrency_count: 1 # leave this at 1, llama-cpp-python doesn't handle concurrent requests and will crash the entire app
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ --extra-index-url https://pypi.ngc.nvidia.com
2
+ nvidia-cuda-runtime
3
+ nvidia-cublas
4
+ llama-cpp-python @ https://github.com/OpenAccess-AI-Collective/ggml-webui/releases/download/v0.1.50-rc3/llama_cpp_python-gpu-0.1.50-cp38-cp38-linux_x86_64.whl
5
+ pyyaml
6
+ torch