bigmed@bigmed commited on
Commit
f5835dc
1 Parent(s): 559a5de

rearrange the description

Browse files
Files changed (1) hide show
  1. MED_VQA_Huggyface_Gradio.py +2 -2
MED_VQA_Huggyface_Gradio.py CHANGED
@@ -166,8 +166,8 @@ examples = [["train_0000.jpg", "Where are liver stem cells (oval cells) located?
166
  ["train_0019.jpg", "What is ischemic coagulative necrosis?"]]
167
 
168
  title = "Vision–Language Model for Visual Question Answering in Medical Imagery"
169
- description = "Gradio Demo for VQA medical model trained on PathVQA dataset, To use it, upload your image and type a question and click 'submit', or click one of the examples to load them.<br>" \
170
- "You can access the paper <a href='https://www.mdpi.com/2306-5354/10/3/380' target='_blank'> Vision–Language Model for Visual Question Answering in Medical Imagery</a>. Y Bazi, MMA Rahhal, L Bashmal, M Zuair - Bioengineering, 2023"
171
  ### link to paper and github code
172
  article = "<p style='text-align: center'><a href='https://www.mdpi.com/2306-5354/10/3/380' target='_blank'>BigMed@ai</a></p>"
173
 
 
166
  ["train_0019.jpg", "What is ischemic coagulative necrosis?"]]
167
 
168
  title = "Vision–Language Model for Visual Question Answering in Medical Imagery"
169
+ description = "Y Bazi, MMA Rahhal, L Bashmal, M Zuair. <a href='https://www.mdpi.com/2306-5354/10/3/380' target='_blank'> Vision–Language Model for Visual Question Answering in Medical Imagery</a>. Bioengineering, 2023<br><br>"\
170
+ "Gradio Demo for VQA medical model trained on PathVQA dataset, To use it, upload your image and type a question and click 'submit', or click one of the examples to load them." \
171
  ### link to paper and github code
172
  article = "<p style='text-align: center'><a href='https://www.mdpi.com/2306-5354/10/3/380' target='_blank'>BigMed@ai</a></p>"
173