import torch from transformers import AutoTokenizer, AutoModelForCausalLM from peft import PeftModel import gradio as gr import os # Model loading base_model_name = "adarsh3601/my_gemma_pt3" adapter_name = "adarsh3601/my_gemma3_pt" device = "cuda" if torch.cuda.is_available() else "cpu" auth_token = os.getenv("HF_AUTH_TOKEN") # Make sure to set the Hugging Face token as an environment variable # Load model and tokenizer tokenizer = AutoTokenizer.from_pretrained(base_model_name, use_auth_token=auth_token) base_model = AutoModelForCausalLM.from_pretrained( base_model_name, device_map={"": device}, torch_dtype=torch.float16, load_in_4bit=True, use_auth_token=auth_token ) model = PeftModel.from_pretrained(base_model, adapter_name) model.to(device) # Chat function def chat(message): inputs = tokenizer(message, return_tensors="pt") inputs = {k: v.to(device).half() for k, v in inputs.items()} outputs = model.generate(**inputs, max_new_tokens=150, do_sample=True) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Launch Gradio app iface = gr.Interface(fn=chat, inputs="text", outputs="text", title="Gemma Chatbot") iface.launch()