import torch from transformers import AutoTokenizer, AutoModelForCausalLM from peft import PeftModel import gradio as gr # Model loading base_model_name = "unsloth/gemma-3-12b-it-unsloth-bnb-4bit" adapter_name = "adarsh3601/my_gemma3_pt" device = "cuda" if torch.cuda.is_available() else "cpu" # Load base model in 4-bit with float16 base_model = AutoModelForCausalLM.from_pretrained( base_model_name, device_map="auto", torch_dtype=torch.float16, load_in_4bit=True ) # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(base_model_name) # Load fine-tuned adapter model = PeftModel.from_pretrained(base_model, adapter_name) model.to(device) # Chat function def chat(message): try: # Tokenize input (do NOT convert to .half()) inputs = tokenizer(message, return_tensors="pt").to(device) # Generate output outputs = model.generate( **inputs, max_new_tokens=150, do_sample=True, temperature=0.7, top_p=0.95 ) # Decode output response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response except Exception as e: print("Unexpected error:", e) return "An error occurred during generation." # Launch Gradio interface iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="Gemma Chatbot") iface.launch()