import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b") streamer = TextStreamer(tokenizer) def generate(inputs): inputs = tokenizer([inputs], return_tensors="pt") yield model.generate(**inputs, streamer=streamer) app = gr.ChatInterface(generate) app.launch(debug=True)