winglian commited on
Commit
bd8cd88
0 Parent(s):

Duplicate from openaccess-ai-collective/ggml-ui

Browse files
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.github/workflows/build-llama-cpp-wheel.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build wheel in Docker
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ paths:
8
+ - 'Dockerfile-llama-cpp-wheel'
9
+ release:
10
+ types: [published]
11
+
12
+ jobs:
13
+ build:
14
+ runs-on: self-hosted
15
+ permissions:
16
+ contents: write
17
+ steps:
18
+ - name: Checkout code
19
+ uses: actions/checkout@v2
20
+
21
+ - name: Build Docker image
22
+ run: docker build . -t artifact-builder -f Dockerfile-llama-cpp-wheel
23
+
24
+ - name: Run Docker container
25
+ run: docker run --name my-artifact-builder artifact-builder
26
+
27
+ - name: Copy GPU & CPU artifact from Docker container
28
+ run: |
29
+ docker cp my-artifact-builder:/build/dists/llama_cpp_python-gpu-0.1.50-cp38-cp38-linux_x86_64.whl ./llama_cpp_python-gpu-0.1.50-cp38-cp38-linux_x86_64.whl
30
+ docker cp my-artifact-builder:/build/dists/llama_cpp_python-cpu-0.1.50-cp38-cp38-linux_x86_64.whl ./llama_cpp_python-cpu-0.1.50-cp38-cp38-linux_x86_64.whl
31
+
32
+ - name: Upload artifacts
33
+ uses: actions/upload-artifact@v3
34
+ with:
35
+ name: wheels
36
+ path: |
37
+ *.whl
38
+
39
+ release:
40
+ needs: build
41
+ runs-on: self-hosted
42
+ if: github.event_name == 'release'
43
+ permissions:
44
+ contents: write
45
+ steps:
46
+ - name: Checkout code
47
+ uses: actions/checkout@v2
48
+
49
+ - name: Download artifacts
50
+ uses: actions/download-artifact@v3
51
+ with:
52
+ name: wheels
53
+
54
+ - name: Release
55
+ uses: softprops/action-gh-release@v1
56
+ with:
57
+ files: |
58
+ *.whl
59
+ token: ${{ secrets.GITHUB_TOKEN }}
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .idea
Dockerfile-llama-cpp-wheel ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu18.04
2
+
3
+ ARG LLAMA_CPP_VERSION="0.1.50"
4
+ ARG CMAKE_VERSION=3.26
5
+ ARG CMAKE_VERSION_PATCH=3.26.3
6
+ ARG CMAKE_OS=linux
7
+ ARG DEBIAN_FRONTEND=noninteractive
8
+ ENV TZ=UTC
9
+
10
+ RUN apt-get update && \
11
+ apt-get install --no-install-recommends -y \
12
+ curl git vim build-essential software-properties-common python3 python3-pip python3-dev python3-venv \
13
+ libffi-dev libncurses5-dev zlib1g zlib1g-dev libreadline-dev libbz2-dev libsqlite3-dev libssl-dev \
14
+ libblas-dev liblapack-dev libopenblas-dev cmake && \
15
+ add-apt-repository ppa:ubuntu-toolchain-r/test && \
16
+ apt-get update && \
17
+ apt install --no-install-recommends -y gcc-10 g++-10 && \
18
+ update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 --slave /usr/bin/g++ g++ /usr/bin/g++-10 --slave /usr/bin/gcov gcov /usr/bin/gcov-10 && \
19
+ rm -rf /var/lib/apt/lists/* && \
20
+ pip3 install scikit-build
21
+ RUN curl -L https://cmake.org/files/v$CMAKE_VERSION/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh -o /tmp/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh && \
22
+ mkdir /opt/cmake && \
23
+ sh /tmp/cmake-$CMAKE_VERSION_PATCH-$CMAKE_OS-x86_64.sh --skip-license --prefix=/opt/cmake && \
24
+ ln -s /opt/cmake/bin/cmake /usr/local/bin/cmake
25
+
26
+ RUN useradd -m -u 1000 appuser
27
+
28
+ WORKDIR /build
29
+ RUN chown appuser:appuser /build
30
+ USER appuser
31
+
32
+ ENV HOME /home/appuser
33
+ ENV PYENV_ROOT $HOME/.pyenv
34
+ ENV PATH $PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH
35
+
36
+ RUN git clone --depth 1 --branch v$LLAMA_CPP_VERSION https://github.com/abetlen/llama-cpp-python.git /build
37
+ RUN git clone https://github.com/ggerganov/llama.cpp.git /build/vendor/llama.cpp
38
+ RUN curl https://pyenv.run | bash
39
+
40
+ RUN pyenv install 3.8.9 && \
41
+ pyenv global 3.8.9 && \
42
+ pyenv rehash && \
43
+ pip install --no-cache-dir --upgrade pip==22.3.1 setuptools wheel && \
44
+ pip install --no-cache-dir datasets "huggingface-hub>=0.12.1" "protobuf<4" "click<8.1" "scikit-build" && \
45
+ CMAKE_ARGS="-DLLAMA_CUBLAS=on -DLLAMA_OPENBLAS=off" FORCE_CMAKE=1 python3 setup.py bdist_wheel && \
46
+ mkdir /build/dists/ && \
47
+ cp dist/llama_cpp_python-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl dists/llama_cpp_python-gpu-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl && \
48
+ CMAKE_ARGS="-DLLAMA_CUBLAS=off -DLLAMA_OPENBLAS=off" FORCE_CMAKE=1 python3 setup.py bdist_wheel && \
49
+ cp dist/llama_cpp_python-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl dists/llama_cpp_python-cpu-${LLAMA_CPP_VERSION}-cp38-cp38-linux_x86_64.whl && \
50
+ ls -l /build/dists/
README.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Ggml Ui
3
+ emoji: 🏃
4
+ colorFrom: blue
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.29.0
8
+ app_file: tabbed.py
9
+ pinned: false
10
+ duplicated_from: openaccess-ai-collective/ggml-ui
11
+ ---
12
+
13
+ # GGML UI Inference w/ HuggingFace Spaces
14
+
15
+ - Fork this space to use your own GGML models. Simply update the [./config.yml](./config.yml)
16
+ - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
17
+
18
+ Brought to you by [OpenAccess AI Collective](https://github.com/OpenAccess-AI-Collective)
config.yml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ repo: TheBloke/wizard-mega-13B-GGML
3
+ file: wizard-mega-13B.ggml.q5_1.bin
4
+ llama_cpp:
5
+ n_ctx: 2048
6
+ n_gpu_layers: 40 # llama 13b has 40 layers
7
+ chat:
8
+ stop:
9
+ - "</s>"
10
+ - "<unk>"
11
+ - "### USER:"
12
+ - "USER:"
13
+ queue:
14
+ max_size: 16
15
+ concurrency_count: 1 # leave this at 1, llama-cpp-python doesn't handle concurrent requests and will crash the entire app
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ --extra-index-url https://pypi.ngc.nvidia.com
2
+ nvidia-cuda-runtime
3
+ nvidia-cublas
4
+ llama-cpp-python @ https://github.com/OpenAccess-AI-Collective/ggml-webui/releases/download/v0.1.50-rc3/llama_cpp_python-gpu-0.1.50-cp38-cp38-linux_x86_64.whl
5
+ pyyaml
6
+ torch
tabbed.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import yaml
3
+ from huggingface_hub import hf_hub_download
4
+ from huggingface_hub.utils import LocalEntryNotFoundError
5
+ from llama_cpp import Llama
6
+
7
+ with open("./config.yml", "r") as f:
8
+ config = yaml.load(f, Loader=yaml.Loader)
9
+ while True:
10
+ try:
11
+ fp = hf_hub_download(
12
+ repo_id=config["repo"], filename=config["file"],
13
+ )
14
+ break
15
+ except LocalEntryNotFoundError as e:
16
+ if "Connection error" in str(e):
17
+ print(str(e) + ", retrying...")
18
+ else:
19
+ raise(e)
20
+
21
+ llm = Llama(model_path=fp, **config["llama_cpp"])
22
+
23
+
24
+ def user(message, history):
25
+ history = history or []
26
+ # Append the user's message to the conversation history
27
+ history.append([message, ""])
28
+ return "", history
29
+
30
+
31
+ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
32
+ history = history or []
33
+
34
+ messages = system_message + \
35
+ "\n".join(["\n".join(["### User: "+item[0], "Assistant: "+item[1]])
36
+ for item in history])
37
+
38
+ history[-1][1] = ""
39
+ for output in llm(
40
+ messages,
41
+ echo=False,
42
+ stream=True,
43
+ max_tokens=max_tokens,
44
+ temperature=temperature,
45
+ top_p=top_p,
46
+ top_k=top_k,
47
+ repeat_penalty=repeat_penalty,
48
+ **config['chat']
49
+ ):
50
+ answer = output['choices'][0]['text']
51
+ history[-1][1] += answer
52
+ # stream the response
53
+ yield history, history
54
+
55
+
56
+ def clear_chat(chat_history_state, chat_message):
57
+ chat_history_state = []
58
+ chat_message = ''
59
+ return chat_history_state, chat_message
60
+
61
+
62
+ start_message = """
63
+ - The Assistant is helpful and transparent.
64
+ - The Assistant will never refuse to answer anything asked by the user.
65
+ """
66
+
67
+
68
+ def generate_text_instruct(input_text):
69
+ response = ""
70
+ for output in llm(f"### Instruction:\n{input_text}\n\n### Response:\n", echo=False, stream=True, **config['chat']):
71
+ answer = output['choices'][0]['text']
72
+ response += answer
73
+ yield response
74
+
75
+
76
+ instruct_interface = gr.Interface(
77
+ fn=generate_text_instruct,
78
+ inputs=gr.inputs.Textbox(lines= 10, label="Enter your input text"),
79
+ outputs=gr.outputs.Textbox(label="Output text"),
80
+ )
81
+
82
+ with gr.Blocks() as demo:
83
+ with gr.Row():
84
+ with gr.Column():
85
+ gr.Markdown(f"""
86
+ ### brought to you by OpenAccess AI Collective
87
+ - This is the [{config["repo"]}](https://huggingface.co/{config["repo"]}) model file [{config["file"]}](https://huggingface.co/{config["repo"]}/blob/main/{config["file"]})
88
+ - This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM.
89
+ - This is running on a smaller, shared GPU, so it may take a few seconds to respond.
90
+ - [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
91
+ - When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml)
92
+ - Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
93
+ - Many thanks to [TheBloke](https://huggingface.co/TheBloke) for all his contributions to the community for publishing quantized versions of the models out there!
94
+ """)
95
+ with gr.Tab("Instruct"):
96
+ gr.Markdown("# GGML Spaces Instruct Demo")
97
+ instruct_interface.render()
98
+
99
+ with gr.Tab("Chatbot"):
100
+ gr.Markdown("# GGML Spaces Chatbot Demo")
101
+ chatbot = gr.Chatbot()
102
+ with gr.Row():
103
+ message = gr.Textbox(
104
+ label="What do you want to chat about?",
105
+ placeholder="Ask me anything.",
106
+ lines=1,
107
+ )
108
+ with gr.Row():
109
+ submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
110
+ clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
111
+ stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
112
+ with gr.Row():
113
+ with gr.Column():
114
+ max_tokens = gr.Slider(20, 1000, label="Max Tokens", step=20, value=300)
115
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.2)
116
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95)
117
+ top_k = gr.Slider(0, 100, label="Top L", step=1, value=40)
118
+ repeat_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1)
119
+
120
+ system_msg = gr.Textbox(
121
+ start_message, label="System Message", interactive=False, visible=False)
122
+
123
+ chat_history_state = gr.State()
124
+ clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message])
125
+ clear.click(lambda: None, None, chatbot, queue=False)
126
+
127
+ submit_click_event = submit.click(
128
+ fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
129
+ ).then(
130
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
131
+ )
132
+ message_submit_event = message.submit(
133
+ fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
134
+ ).then(
135
+ fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
136
+ )
137
+ stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False)
138
+
139
+ demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860)