File size: 10,889 Bytes
21b8ce0 13e498a b5c263a 6e7a7b9 615a20b 6e7a7b9 d8a3c53 606c0ce 40afde6 a15f664 0e76b02 ec844a4 0e76b02 d8a3c53 69a7f00 0f3faf8 406e8a1 0f3faf8 69a7f00 70b9f60 a98bbb4 ce42b8d 0f3faf8 fa1d75d 9129a83 db48050 e91853e 52376b1 db48050 e91853e db48050 9129a83 69a7f00 70b9f60 75cbc5e e91853e db69b67 e91853e cd760c2 e91853e c940f4e 75cbc5e ed82b9d 69a7f00 75cbc5e 2110ec0 75cbc5e 2110ec0 79f3ba6 69a7f00 0e76b02 8ae85d5 0e76b02 3b38821 d8a3c53 c82d589 d8a3c53 0824852 d8a3c53 a15f664 0e76b02 a15f664 5c89384 cf6a52f 11987d3 0e76b02 cf6a52f 8e6bf26 cf6a52f 5c89384 e9aaf81 5c89384 cf6a52f ac70b49 777a931 ac70b49 777a931 cf6a52f 0e76b02 cf6a52f 559c9c0 8e6bf26 b9838b1 559c9c0 606c0ce 532ca99 c81bf86 abe9b48 bd3bee4 dc0459e f10f6b3 8868ee5 615a20b ec844a4 615a20b 894263b 615a20b ffeaed0 532ca99 d8a3c53 c82d589 ec844a4 c82d589 ec844a4 c82d589 606c0ce 0824852 d8a3c53 0824852 d8a3c53 6ccc337 15a7813 5eb0b07 5c900a6 ed82b9d 9a1c399 736d872 c940f4e 2110ec0 7c944d3 c940f4e 905a689 f97343d e6201bd 592c790 532ca99 c940f4e d8a3c53 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 |
import spaces
import json
import subprocess
from llama_cpp import Llama
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
from llama_cpp_agent.providers import LlamaCppPythonProvider
from llama_cpp_agent.chat_history import BasicChatHistory
from llama_cpp_agent.chat_history.messages import Roles
import gradio as gr
from huggingface_hub import hf_hub_download
llm = None
llm_model = None
hf_hub_download(
repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
local_dir = "./models"
)
hf_hub_download(
repo_id="bartowski/gemma-2-27b-it-GGUF",
filename="gemma-2-27b-it-Q8_0.gguf",
local_dir = "./models"
)
css = """
.bubble-wrap {
padding-top: calc(var(--spacing-xl) * 3) !important;
}
.message-row {
justify-content: space-evenly !important;
width: 100% !important;
max-width: 100% !important;
margin: calc(var(--spacing-xl)) 0 !important;
padding: 0 calc(var(--spacing-xl) * 3) !important;
}
.flex-wrap.user {
border-bottom-right-radius: var(--radius-lg) !important;
}
.flex-wrap.bot {
border-bottom-left-radius: var(--radius-lg) !important;
}
.message.user{
padding: 10px;
}
.message.bot{
text-align: right;
width: 100%;
padding: 10px;
border-radius: 10px;
}
.message-bubble-border {
border-radius: 6px !important;
}
.message-buttons {
justify-content: flex-end !important;
}
.message-buttons-left {
align-self: end !important;
}
.message-buttons-bot, .message-buttons-user {
right: 10px !important;
left: auto !important;
bottom: 2px !important;
}
.dark.message-bubble-border {
border-color: #343140 !important;
}
.dark.user {
background: #1e1c26 !important;
}
.dark.assistant.dark, .dark.pending.dark {
background: #16141c !important;
}
"""
def get_messages_formatter_type(model_name):
if "Llama" in model_name:
return MessagesFormatterType.LLAMA_3
elif "gemma" in model_name:
return MessagesFormatterType.GEMMA_2
else:
raise ValueError(f"Unsupported model: {model_name}")
@spaces.GPU(duration=120)
def respond(
message,
history: list[tuple[str, str]],
model,
system_message,
max_tokens,
temperature,
top_p,
top_k,
repeat_penalty,
):
global llm
global llm_model
chat_template = get_messages_formatter_type(model)
if llm is None or llm_model != model:
llm = Llama(
model_path=f"models/{model}",
flash_attn=True,
n_gpu_layers=81,
n_batch=1024,
n_ctx=8192,
)
llm_model = model
provider = LlamaCppPythonProvider(llm)
agent = LlamaCppAgent(
provider,
system_prompt=f"{system_message}",
predefined_messages_formatter_type=chat_template,
debug_output=True
)
settings = provider.get_provider_default_settings()
settings.temperature = temperature
settings.top_k = top_k
settings.top_p = top_p
settings.max_tokens = max_tokens
settings.repeat_penalty = repeat_penalty
settings.stream = True
messages = BasicChatHistory()
for msn in history:
user = {
'role': Roles.user,
'content': msn[0]
}
assistant = {
'role': Roles.assistant,
'content': msn[1]
}
messages.add_message(user)
messages.add_message(assistant)
stream = agent.get_chat_response(
message,
llm_sampling_settings=settings,
chat_history=messages,
returns_streaming_generator=True,
print_output=False
)
outputs = ""
for output in stream:
outputs += output
yield outputs
PLACEHOLDER = """
<div class="message-bubble-border" style="display:flex; max-width: 600px; border-radius: 6px; border-width: 1px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); backdrop-filter: blur(10px);">
<figure style="margin: 0;max-width: 200px;min-height: 300px;">
<img src="https://huggingface.co/spaces/pabloce/llama-cpp-agent/resolve/main/llama.jpg" alt="Logo" style="width: 100%; height: 100%; border-radius: 8px;">
</figure>
<div style="padding: .5rem 1.5rem;display: flex;flex-direction: column;justify-content: space-evenly;">
<h2 style="text-align: left; font-size: 1.5rem; font-weight: 700; margin-bottom: 0.5rem;">llama-cpp-agent</h2>
<p style="text-align: left; font-size: 16px; line-height: 1.5; margin-bottom: 15px;">The llama-cpp-agent framework simplifies interactions with Large Language Models (LLMs), providing an interface for chatting, executing function calls, generating structured output, performing retrieval augmented generation, and processing text using agentic chains with tools.</p>
<div style="display: flex; justify-content: space-between; align-items: center;">
<div style="display: flex; flex-flow: column; justify-content: space-between;">
<span style="display: inline-flex; align-items: center; border-radius: 0.375rem; background-color: rgba(229, 70, 77, 0.1); padding: 0.1rem 0.75rem; font-size: 0.75rem; font-weight: 500; color: #f88181; margin-bottom: 2.5px;">
Gemma-2 27b it
</span>
<span style="display: inline-flex; align-items: center; border-radius: 0.375rem; background-color: rgba(79, 70, 229, 0.1); padding: 0.1rem 0.75rem; font-size: 0.75rem; font-weight: 500; color: #60a5fa; margin-top: 2.5px;">
Meta Llama 3 70B Instruct
</span>
</div>
<div style="display: flex; justify-content: flex-end; align-items: center;">
<a href="https://discord.gg/fgr5RycPFP" target="_blank" rel="noreferrer" style="padding: .5rem;">
<svg width="24" height="24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 5 30.67 23.25">
<title>Discord</title>
<path d="M26.0015 6.9529C24.0021 6.03845 21.8787 5.37198 19.6623 5C19.3833 5.48048 19.0733 6.13144 18.8563 6.64292C16.4989 6.30193 14.1585 6.30193 11.8336 6.64292C11.6166 6.13144 11.2911 5.48048 11.0276 5C8.79575 5.37198 6.67235 6.03845 4.6869 6.9529C0.672601 12.8736 -0.41235 18.6548 0.130124 24.3585C2.79599 26.2959 5.36889 27.4739 7.89682 28.2489C8.51679 27.4119 9.07477 26.5129 9.55525 25.5675C8.64079 25.2265 7.77283 24.808 6.93587 24.312C7.15286 24.1571 7.36986 23.9866 7.57135 23.8161C12.6241 26.1255 18.0969 26.1255 23.0876 23.8161C23.3046 23.9866 23.5061 24.1571 23.7231 24.312C22.8861 24.808 22.0182 25.2265 21.1037 25.5675C21.5842 26.5129 22.1422 27.4119 22.7621 28.2489C25.2885 27.4739 27.8769 26.2959 30.5288 24.3585C31.1952 17.7559 29.4733 12.0212 26.0015 6.9529ZM10.2527 20.8402C8.73376 20.8402 7.49382 19.4608 7.49382 17.7714C7.49382 16.082 8.70276 14.7025 10.2527 14.7025C11.7871 14.7025 13.0425 16.082 13.0115 17.7714C13.0115 19.4608 11.7871 20.8402 10.2527 20.8402ZM20.4373 20.8402C18.9183 20.8402 17.6768 19.4608 17.6768 17.7714C17.6768 16.082 18.8873 14.7025 20.4373 14.7025C21.9717 14.7025 23.2271 16.082 23.1961 17.7714C23.1961 19.4608 21.9872 20.8402 20.4373 20.8402Z"></path>
</svg>
</a>
<a href="https://github.com/Maximilian-Winter/llama-cpp-agent" target="_blank" rel="noreferrer" style="padding: .5rem;">
<svg width="24" height="24" fill="currentColor" viewBox="3 3 18 18">
<title>GitHub</title>
<path d="M12 3C7.0275 3 3 7.12937 3 12.2276C3 16.3109 5.57625 19.7597 9.15374 20.9824C9.60374 21.0631 9.77249 20.7863 9.77249 20.5441C9.77249 20.3249 9.76125 19.5982 9.76125 18.8254C7.5 19.2522 6.915 18.2602 6.735 17.7412C6.63375 17.4759 6.19499 16.6569 5.8125 16.4378C5.4975 16.2647 5.0475 15.838 5.80124 15.8264C6.51 15.8149 7.01625 16.4954 7.18499 16.7723C7.99499 18.1679 9.28875 17.7758 9.80625 17.5335C9.885 16.9337 10.1212 16.53 10.38 16.2993C8.3775 16.0687 6.285 15.2728 6.285 11.7432C6.285 10.7397 6.63375 9.9092 7.20749 9.26326C7.1175 9.03257 6.8025 8.08674 7.2975 6.81794C7.2975 6.81794 8.05125 6.57571 9.77249 7.76377C10.4925 7.55615 11.2575 7.45234 12.0225 7.45234C12.7875 7.45234 13.5525 7.55615 14.2725 7.76377C15.9937 6.56418 16.7475 6.81794 16.7475 6.81794C17.2424 8.08674 16.9275 9.03257 16.8375 9.26326C17.4113 9.9092 17.76 10.7281 17.76 11.7432C17.76 15.2843 15.6563 16.0687 13.6537 16.2993C13.98 16.5877 14.2613 17.1414 14.2613 18.0065C14.2613 19.2407 14.25 20.2326 14.25 20.5441C14.25 20.7863 14.4188 21.0746 14.8688 20.9824C16.6554 20.364 18.2079 19.1866 19.3078 17.6162C20.4077 16.0457 20.9995 14.1611 21 12.2276C21 7.12937 16.9725 3 12 3Z"></path>
</svg>
</a>
</div>
</div>
</div>
</div>
"""
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Dropdown([
'Meta-Llama-3-70B-Instruct-Q3_K_M.gguf',
'gemma-2-27b-it-Q8_0.gguf'
],
value="gemma-2-27b-it-Q8_0.gguf",
label="Model"
),
gr.Textbox(value="You are a helpful assistant.", label="System message"),
gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p",
),
gr.Slider(
minimum=0,
maximum=100,
value=40,
step=1,
label="Top-k",
),
gr.Slider(
minimum=0.0,
maximum=2.0,
value=1.1,
step=0.1,
label="Repetition penalty",
),
],
theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet", neutral_hue="gray",font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
body_background_fill_dark="#16141c",
block_background_fill_dark="#16141c",
block_border_width="1px",
block_title_background_fill_dark="#1e1c26",
input_background_fill_dark="#292733",
button_secondary_background_fill_dark="#24212b",
border_color_accent_dark="#343140",
border_color_primary_dark="#343140",
background_fill_secondary_dark="#16141c",
color_accent_soft_dark="transparent",
code_background_fill_dark="#292733",
),
css=css,
retry_btn="Retry",
undo_btn="Undo",
clear_btn="Clear",
submit_btn="Send",
description="Llama-cpp-agent: Chat multi llm selection",
chatbot=gr.Chatbot(
scale=1,
placeholder=PLACEHOLDER,
likeable=False,
show_copy_button=True
)
)
if __name__ == "__main__":
demo.launch() |