import torch from transformers import AutoTokenizer, AutoModelForCausalLM from peft import PeftModel import gradio as gr # Model loading base_model_name = "unsloth/gemma-3-12b-it-unsloth-bnb-4bit" adapter_name = "adarsh3601/my_gemma3_pt" device = "cuda" if torch.cuda.is_available() else "cpu" # Load base model base_model = AutoModelForCausalLM.from_pretrained( base_model_name, device_map={"": device}, torch_dtype=torch.float16, load_in_4bit=True ) # Load tokenizer and adapter tokenizer = AutoTokenizer.from_pretrained(base_model_name) model = PeftModel.from_pretrained(base_model, adapter_name) model.to(device) # Chat function def chat(message): inputs = tokenizer(message, return_tensors="pt") # Move tensors to the correct device and convert only float tensors to half for k in inputs: if inputs[k].dtype == torch.float32: inputs[k] = inputs[k].to(device).half() else: inputs[k] = inputs[k].to(device) outputs = model.generate(**inputs, max_new_tokens=150, do_sample=True) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Launch Gradio app iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="Gemma Chatbot") iface.launch()