ruslanmv commited on
Commit
ff24809
1 Parent(s): aac8422

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -8
app.py CHANGED
@@ -17,6 +17,25 @@ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
17
  tokenizer.pad_token = tokenizer.eos_token
18
  @spaces.GPU
19
  def askme(symptoms, question):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  template = [
21
  {"role": "system", "content": "You are an AI Medical Assistant trained on a vast dataset of health information. Please be thorough and provide an informative answer. If you don't know the answer to a specific medical inquiry, advise seeking professional help."},
22
  {"role": "user", "content": f"Symptoms: {symptoms}\nQuestion: {question}\n"},
@@ -30,14 +49,6 @@ def askme(symptoms, question):
30
  return response_text
31
 
32
 
33
- examples = [
34
- ["headache", "What are the possible causes of a headache?"],
35
- ["fever", "How can I treat a fever at home?"],
36
- ["cough", "What are the symptoms of a cough?"],
37
- ["chest pain", "What are the possible causes of chest pain?"],
38
- ]
39
-
40
-
41
  # Example usage
42
  symptoms = '''\
43
  I'm a 35-year-old male and for the past few months, I've been experiencing fatigue,
 
17
  tokenizer.pad_token = tokenizer.eos_token
18
  @spaces.GPU
19
  def askme(symptoms, question):
20
+ sys_message = '''\
21
+ You are an AI Medical Assistant trained on a vast dataset of health information. Please be thorough and
22
+ provide an informative answer. If you don't know the answer to a specific medical inquiry, advise seeking professional help.
23
+ '''
24
+ content = symptoms + " " + question
25
+ messages = [{"role": "system", "content": sys_message}, {"role": "user", "content": content}]
26
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
27
+ inputs = tokenizer(prompt, return_tensors="pt").to(device) # Ensure inputs are on CUDA device
28
+ outputs = model.generate(**inputs, max_new_tokens=200, use_cache=True)
29
+ response_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0].strip()
30
+ # Remove system messages and content
31
+ # Extract and return the generated text, removing the prompt
32
+ # Extract only the assistant's response
33
+ answer = response_text.split('<|im_start|>assistant')[-1].strip()
34
+ #answer =response_text.split("assistant")[1].strip().split("user")[0].strip()
35
+ return answer
36
+
37
+
38
+ def askmeold(symptoms, question):
39
  template = [
40
  {"role": "system", "content": "You are an AI Medical Assistant trained on a vast dataset of health information. Please be thorough and provide an informative answer. If you don't know the answer to a specific medical inquiry, advise seeking professional help."},
41
  {"role": "user", "content": f"Symptoms: {symptoms}\nQuestion: {question}\n"},
 
49
  return response_text
50
 
51
 
 
 
 
 
 
 
 
 
52
  # Example usage
53
  symptoms = '''\
54
  I'm a 35-year-old male and for the past few months, I've been experiencing fatigue,