import gradio as gr from llama_cpp import Llama from huggingface_hub import hf_hub_download hf_hub_download(repo_id="LLukas22/gpt4all-lora-quantized-ggjt", filename="ggjt-model.bin", local_dir=".") llm = Llama(model_path="./ggjt-model.bin") def chat(input): return llm(input) gr.Interface(fn=chat, inputs="text", outputs="text").launch()