Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from llama_cpp import Llama | |
| llm = Llama(model_path="model.gguf", n_ctx=8192, n_threads=4, chat_format="chatml") | |
| def generate(message, history,temperature=0.75,max_tokens=768): | |
| system_prompt = "You are a helpful and agreeable chat-bot named Solar. You are capable of Replying with Lightning fast speeds." | |
| formatted_prompt = [{"role": "system", "content": system_prompt}] | |
| for user_prompt, bot_response in history: | |
| formatted_prompt.append({"role": "user", "content": user_prompt}) | |
| formatted_prompt.append({"role": "assistant", "content": bot_response }) | |
| formatted_prompt.append({"role": "user", "content": message}) | |
| stream_response = llm.create_chat_completion(messages=formatted_prompt, temperature=temperature, max_tokens=max_tokens, stream=True) | |
| response = "" | |
| for chunk in stream_response: | |
| if len(chunk['choices'][0]["delta"]) != 0 and "content" in chunk['choices'][0]["delta"]: | |
| response += chunk['choices'][0]["delta"]["content"] | |
| yield response | |
| mychatbot = gr.Chatbot( | |
| avatar_images=["user.png", "bots.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,) | |
| iface = gr.ChatInterface(fn=generate, chatbot=mychatbot, retry_btn="Retry", undo_btn="Undo") | |
| with gr.Blocks() as demo: | |
| gr.HTML("<center><h1>Tomoniai's Chat with Nous Hermes 2 Solar</h1></center>") | |
| iface.render() | |
| demo.queue().launch(show_api=False, server_name="0.0.0.0") | |