import torch from transformers import AutoTokenizer, AutoModelForCausalLM from peft import PeftModel import gradio as gr # Model and device setup base_model_name = "unsloth/gemma-3-12b-it-unsloth-bnb-4bit" adapter_name = "adarsh3601/my_gemma3_pt" device = "cuda" if torch.cuda.is_available() else "cpu" # Load base model with 4-bit quantization base_model = AutoModelForCausalLM.from_pretrained( base_model_name, device_map={"": device}, torch_dtype=torch.float16, # Keep float16 unless it breaks load_in_4bit=True ) # Load tokenizer and adapter tokenizer = AutoTokenizer.from_pretrained(base_model_name) model = PeftModel.from_pretrained(base_model, adapter_name) model.to(device) # Chat function with stability safeguards def chat(message): if not message or not message.strip(): return "Please enter a valid message." inputs = tokenizer(message, return_tensors="pt") # Safely move to device; only convert float tensors to half for k in inputs: if inputs[k].dtype == torch.float32: inputs[k] = inputs[k].to(device).half() else: inputs[k] = inputs[k].to(device) try: with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=150, do_sample=True, top_k=50, top_p=0.95, temperature=0.8 ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response except RuntimeError as e: return f"An error occurred during generation: {str(e)}" # Launch Gradio app iface = gr.Interface( fn=chat, inputs="text", outputs="text", title="Gemma Chatbot" ) iface.launch()