ruslanmv commited on
Commit
d646df8
1 Parent(s): ff24809

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -17
app.py CHANGED
@@ -30,23 +30,14 @@ def askme(symptoms, question):
30
  # Remove system messages and content
31
  # Extract and return the generated text, removing the prompt
32
  # Extract only the assistant's response
33
- answer = response_text.split('<|im_start|>assistant')[-1].strip()
34
- #answer =response_text.split("assistant")[1].strip().split("user")[0].strip()
35
- return answer
36
-
37
-
38
- def askmeold(symptoms, question):
39
- template = [
40
- {"role": "system", "content": "You are an AI Medical Assistant trained on a vast dataset of health information. Please be thorough and provide an informative answer. If you don't know the answer to a specific medical inquiry, advise seeking professional help."},
41
- {"role": "user", "content": f"Symptoms: {symptoms}\nQuestion: {question}\n"},
42
- {"role": "assistant", "content": "{assistant_response}\n"}
43
- ]
44
-
45
- prompt = tokenizer.apply_chat_template(template, tokenize=False, add_generation_prompt=True)
46
- inputs = tokenizer(prompt, return_tensors="pt").to(device)
47
- outputs = model.generate(**inputs, max_new_tokens=300, use_cache=True)
48
- response_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0].strip()
49
- return response_text
50
 
51
 
52
  # Example usage
 
30
  # Remove system messages and content
31
  # Extract and return the generated text, removing the prompt
32
  # Extract only the assistant's response
33
+ #answer = response_text.split('<|im_start|>assistant')[-1].strip()
34
+ start_idx = response_text.find("<|im_start|>assistant")
35
+ end_idx = response_text.find("<|im_end|>", start_idx)
36
+ assistant_response = response_text[start_idx + len("<|im_start|>assistant"):end_idx]
37
+
38
+ return assistant_response.split(". ")[0] + "
39
+
40
+
 
 
 
 
 
 
 
 
 
41
 
42
 
43
  # Example usage