manticore-ggml / tabbed.py
winglian's picture
Duplicate from openaccess-ai-collective/wizard-mega-ggml
15fe28d
raw
history blame
6.15 kB
import gradio as gr
import yaml
from huggingface_hub import hf_hub_download
from huggingface_hub.utils import LocalEntryNotFoundError
from llama_cpp import Llama
with open("./config.yml", "r") as f:
config = yaml.load(f, Loader=yaml.Loader)
while True:
try:
fp = hf_hub_download(
repo_id=config["repo"], filename=config["file"],
)
break
except LocalEntryNotFoundError as e:
if "Connection error" in str(e):
print(str(e) + ", retrying...")
else:
raise(e)
llm = Llama(model_path=fp, **config["llama_cpp"])
def user(message, history):
history = history or []
# Append the user's message to the conversation history
history.append([message, ""])
return "", history
def chat(history, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
history = history or []
messages = system_message + \
"\n".join(["\n".join(["USER: "+item[0], "ASSISTANT: "+item[1]])
for item in history])
# remove last space from assistant
messages = messages[:-1]
history[-1][1] = ""
for output in llm(
messages,
echo=False,
stream=True,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
top_k=top_k,
repeat_penalty=repeat_penalty,
**config['chat']
):
answer = output['choices'][0]['text']
print(output['choices'])
history[-1][1] += answer
# stream the response
yield history, history
def clear_chat(chat_history_state, chat_message):
chat_history_state = []
chat_message = ''
return chat_history_state, chat_message
start_message = ""
def generate_text_instruct(input_text):
response = ""
for output in llm(f"### Instruction:\n{input_text}\n\n### Response:\n", echo=False, stream=True, **config['chat']):
answer = output['choices'][0]['text']
response += answer
yield response
instruct_interface = gr.Interface(
fn=generate_text_instruct,
inputs=gr.inputs.Textbox(lines= 10, label="Enter your input text"),
outputs=gr.outputs.Textbox(label="Output text"),
)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
gr.Markdown(f"""
### brought to you by OpenAccess AI Collective
- Unquantized model available at https://huggingface.co/openaccess-ai-collective/wizard-mega-13b
- This is the [{config["repo"]}](https://huggingface.co/{config["repo"]}) model file [{config["file"]}](https://huggingface.co/{config["repo"]}/blob/main/{config["file"]})
- This Space uses GGML with GPU support, so it can quickly run larger models on smaller GPUs & VRAM.
- This is running on a smaller, shared GPU, so it may take a few seconds to respond.
- [Duplicate the Space](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui?duplicate=true) to skip the queue and run in a private space or to use your own GGML models.
- When using your own models, simply update the [config.yml](https://huggingface.co/spaces/openaccess-ai-collective/ggml-ui/blob/main/config.yml)
- Contribute at [https://github.com/OpenAccess-AI-Collective/ggml-webui](https://github.com/OpenAccess-AI-Collective/ggml-webui)
- Many thanks to [TheBloke](https://huggingface.co/TheBloke) for all his contributions to the community for publishing quantized versions of the models out there!
""")
with gr.Tab("Instruct"):
gr.Markdown("# GGML Spaces Instruct Demo")
instruct_interface.render()
with gr.Tab("Chatbot"):
gr.Markdown("# GGML Spaces Chatbot Demo")
chatbot = gr.Chatbot()
with gr.Row():
message = gr.Textbox(
label="What do you want to chat about?",
placeholder="Ask me anything.",
lines=1,
)
with gr.Row():
submit = gr.Button(value="Send message", variant="secondary").style(full_width=True)
clear = gr.Button(value="New topic", variant="secondary").style(full_width=False)
stop = gr.Button(value="Stop", variant="secondary").style(full_width=False)
with gr.Row():
with gr.Column():
max_tokens = gr.Slider(20, 500, label="Max Tokens", step=20, value=300)
temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=0.8)
top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95)
top_k = gr.Slider(0, 100, label="Top K", step=1, value=40)
repeat_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1)
system_msg = gr.Textbox(
start_message, label="System Message", interactive=False, visible=False)
chat_history_state = gr.State()
clear.click(clear_chat, inputs=[chat_history_state, message], outputs=[chat_history_state, message], queue=False)
clear.click(lambda: None, None, chatbot, queue=False)
submit_click_event = submit.click(
fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
).then(
fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
)
message_submit_event = message.submit(
fn=user, inputs=[message, chat_history_state], outputs=[message, chat_history_state], queue=True
).then(
fn=chat, inputs=[chat_history_state, system_msg, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, chat_history_state], queue=True
)
stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False)
demo.queue(**config["queue"]).launch(debug=True, server_name="0.0.0.0", server_port=7860)