# Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("Abonia/Llama-2-7b-chat-finetune") model = AutoModelForCausalLM.from_pretrained("Abonia/Llama-2-7b-chat-finetune") # Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="Abonia/Llama-2-7b-chat-finetune") import gradio as gr gr.load("models/Abonia/Llama-2-7b-chat-finetune").launch()