ruslanmv commited on
Commit
398ee6b
1 Parent(s): 184a052

First commit

Browse files
Files changed (2) hide show
  1. app.py +69 -0
  2. requirements.txt +8 -0
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
3
+ import torch
4
+ model_name = "ruslanmv/Medical-Llama3-8B"
5
+ device_map = 'auto'
6
+ # Check if GPU is available
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+ print(f"Using device: {device}")
9
+
10
+ if device.type == "cuda":
11
+ bnb_config = BitsAndBytesConfig(
12
+ load_in_4bit=True,
13
+ bnb_4bit_quant_type="nf4",
14
+ bnb_4bit_compute_dtype=torch.float16,
15
+ )
16
+ model = AutoModelForCausalLM.from_pretrained(
17
+ model_name,
18
+ quantization_config=bnb_config,
19
+ trust_remote_code=True,
20
+ use_cache=False,
21
+ device_map=device_map
22
+ )
23
+ else:
24
+ model = AutoModelForCausalLM.from_pretrained(
25
+ model_name,
26
+ trust_remote_code=True,
27
+ use_cache=False
28
+ )
29
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
30
+ tokenizer.pad_token = tokenizer.eos_token
31
+
32
+ def askme(symptoms, question):
33
+ sys_message = '''
34
+ You are an AI Medical Assistant trained on a vast dataset of health information. Please be thorough and
35
+ provide an informative answer. If you don't know the answer to a specific medical inquiry, advise seeking professional help.
36
+ '''
37
+ content = symptoms + " " + question
38
+ messages = [{"role": "system", "content": sys_message}, {"role": "user", "content": content}]
39
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
40
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
41
+ outputs = model.generate(**inputs, max_new_tokens=200, use_cache=True)
42
+ response_text = tokenizer.batch_decode(outputs)[0].strip()
43
+ answer = response_text.split('<|im_start|>assistant')[-1].strip()
44
+ return answer
45
+
46
+ # Example usage
47
+ symptoms = '''
48
+ I'm a 35-year-old male and for the past few months, I've been experiencing fatigue,
49
+ increased sensitivity to cold, and dry, itchy skin.
50
+ '''
51
+ question = '''
52
+ Could these symptoms be related to hypothyroidism?
53
+ If so, what steps should I take to get a proper diagnosis and discuss treatment options?
54
+ '''
55
+
56
+ examples = [
57
+ [symptoms, question]
58
+ ]
59
+
60
+ iface = gr.Interface(
61
+ fn=askme,
62
+ inputs=["text", "text"],
63
+ outputs="text",
64
+ examples=examples,
65
+ title="Medical AI Chatbot",
66
+ description="Ask me a medical question!"
67
+ )
68
+
69
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ torch==2.2.1
2
+ torchvision
3
+ torchaudio
4
+ xformers
5
+ bitsandbytes
6
+ accelerate
7
+ gradio
8
+ transformers