import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig import torch import spaces IS_SPACES_ZERO = os.environ.get("SPACES_ZERO_GPU", "0") == "1" IS_SPACE = os.environ.get("SPACE_ID", None) is not None #device = "cuda" if torch.cuda.is_available() else "cpu" #device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #dtype = torch.float16 LOW_MEMORY = os.getenv("LOW_MEMORY", "0") == "1" #print(f"Using device: {device}") #print(f"Using dtype: {dtype}") print(f"low memory: {LOW_MEMORY}") device = "cuda" model_name = "ruslanmv/Medical-Llama3-8B" # Move model and tokenizer to the CUDA device model = AutoModelForCausalLM.from_pretrained(model_name).to(device) tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True).to(device) tokenizer.pad_token = tokenizer.eos_token @spaces.GPU def askme(symptoms, question): sys_message = '''\ You are an AI Medical Assistant trained on a vast dataset of health information. Please be thorough and provide an informative answer. If you don't know the answer to a specific medical inquiry, advise seeking professional help. ''' content = symptoms + " " + question messages = [{"role": "system", "content": sys_message}, {"role": "user", "content": content}] prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) inputs = tokenizer(prompt, return_tensors="pt").to(device) # Ensure inputs are on CUDA device outputs = model.generate(**inputs, max_new_tokens=200, use_cache=True) response_text = tokenizer.batch_decode(outputs)[0].strip() answer = response_text.split('<|im_start|>assistant')[-1].strip() return answer # Example usage symptoms = '''\ I'm a 35-year-old male and for the past few months, I've been experiencing fatigue, increased sensitivity to cold, and dry, itchy skin. ''' question = '''\ Could these symptoms be related to hypothyroidism? If so, what steps should I take to get a proper diagnosis and discuss treatment options? ''' examples = [ [symptoms, question] ] iface = gr.Interface( fn=askme, inputs=["text", "text"], outputs="text", examples=examples, title="Medical AI Chatbot", description="Ask me a medical question!" ) iface.launch()