from ctransformers import AutoModelForCausalLM import gradio as gr llms = { "tinnyllama":{"name": "TheBloke/TinyLlama-1.1B-1T-OpenOrca-GGUF", "file":"tinyllama-1.1b-1t-openorca.Q4_K_M.gguf", "suffix":"<|im_end|><|im_start|>assistant", "prefix":"<|im_start|>system You are a helpful assistant <|im_end|><|im_start|>user"}, "orca2":{"name": "TheBloke/Orca-2-7B-GGUF", "file":"orca-2-7b.Q4_K_M.gguf", "suffix":"<|im_end|><|im_start|>assistant", "prefix":"<|im_start|>system You are a helpful assistant<|im_end|><|im_start|>user "}, "zephyr":{"name": "TheBloke/zephyr-7B-beta-GGUF", "file":"zephyr-7b-beta.Q4_K_M.gguf", "suffix":"<|assistant|>", "prefix":"<|system|>You are a helpful assistant<|user|> "}, "mixtral":{"name": "TheBloke/Mistral-7B-Instruct-v0.1-GGUF", "file":"mistral-7b-instruct-v0.1.Q4_K_M.gguf", "suffix":"[/INST]", "prefix":"[INST] "}, "llama2":{"name": "TheBloke/Llama-2-7B-Chat-GGUF", "file":"llama-2-7b-chat.Q4_K_M.gguf", "suffix":"[/INST]", "prefix":"[INST] <> You are a helpful assistant <>"}, "llama2":{"name": "TheBloke/SOLAR-10.7B-Instruct-v1.0-GGUF", "file":"solar-10.7b-instruct-v1.0.Q4_K_M.gguf", "suffix":"\n### Assistant:\n", "prefix":"### User:\n"} } for k in llms.keys(): AutoModelForCausalLM.from_pretrained(llms[k]['name'], model_file=llms[k]['file']) def stream(prompt, llm_name, UL): prefix=llms[llm_name]['prefix'] suffix=llms[llm_name]['suffix'] user=""" {prompt}""" llm = AutoModelForCausalLM.from_pretrained(llms[llm_name]['name'], model_file=llms[llm_name]['file']) prompt = f"{prefix}{user.replace('{prompt}', item.prompt)}{suffix}" return llm(prompt) css = """ h1 { text-align: center; } #duplicate-button { margin: auto; color: white; background: #1565c0; border-radius: 100vh; } .contain { max-width: 900px; margin: auto; padding-top: 1.5rem; } """ select_llm = gradio.Dropdown(choices=llms.keys(), value=llms.keys()[0], max_choices=1) chat_interface = gr.ChatInterface( fn=stream, additional_inputs=[select_llm], stop_btn=None, examples=[ ["explain Large language model"], ["what is quantum computing"] ], ) with gr.Blocks(css=css) as demo: gr.HTML("

Gathnex Free LLM Deployment Space

") gr.HTML("

Gathnex AI💬

") gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button") chat_interface.render() gr.Markdown(greety) if __name__ == "__main__": demo.queue(max_size=10).launch()