import os import gradio as gr from transformers import pipeline token = os.environ.get('HF_TOKEN') # Use a pipeline as a high-level helper pipe = pipeline("text-generation", model="meta-llama/Llama-2-7b-chat-hf", token=token) def llama_chat(name): resp = pipe(name) return resp iface = gr.Interface(fn=llama_chat, inputs="text", outputs="text") iface.launch()