File size: 303 Bytes
18d3078
 
36a627d
18d3078
 
 
1
2
3
4
5
6
from transformers import pipeline
import gradio as gr
import torch

pipeline = pipeline("text-generation", model="google/gemma-2-2b", torch_dtype=torch.bfloat16, device_map="auto", model_kwargs={"quantization_config": BitsAndBytesConfig(load_in_8bit=True)})
gr.Interface.from_pipeline(pipeline).launch()