bigmed@bigmed commited on
Commit
559a5de
1 Parent(s): ca864fe

paper published had to be added to the page

Browse files
Files changed (3) hide show
  1. CLIP/clip.py +1 -1
  2. MED_VQA_Huggyface_Gradio.py +9 -9
  3. README.md +1 -1
CLIP/clip.py CHANGED
@@ -119,7 +119,7 @@ def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_a
119
  state_dict = torch.load(model_path, map_location="cpu")
120
 
121
  if not jit:
122
- print("Heree.....")
123
  model = build_model(state_dict or model.state_dict()).to(device)
124
  if str(device) == "cpu":
125
  model.float()
 
119
  state_dict = torch.load(model_path, map_location="cpu")
120
 
121
  if not jit:
122
+ # print("Heree.....")
123
  model = build_model(state_dict or model.state_dict()).to(device)
124
  if str(device) == "cpu":
125
  model.float()
MED_VQA_Huggyface_Gradio.py CHANGED
@@ -155,9 +155,9 @@ def infer_answer_question(image, text):
155
  return cap_result
156
 
157
 
158
- image = gr.inputs.Image(type="pil")
159
- question = gr.inputs.Textbox(label="Question")
160
- answer = gr.outputs.Textbox(label="Predicted answer")
161
  examples = [["train_0000.jpg", "Where are liver stem cells (oval cells) located?"],
162
  ["train_0001.jpg", "What are stained here with an immunohistochemical stain for cytokeratin 7?"],
163
  ["train_0002.jpg", "What are bile duct cells and canals of Hering stained here with for cytokeratin 7?"],
@@ -165,10 +165,11 @@ examples = [["train_0000.jpg", "Where are liver stem cells (oval cells) located?
165
  ["train_0018.jpg", "Is there an infarct in the brain hypertrophy?"],
166
  ["train_0019.jpg", "What is ischemic coagulative necrosis?"]]
167
 
168
- title = "Interactive Visual Question Answering demo(BigMed@ai: Artificial Intelligence for Large-Scale Medical Image Analysis)"
169
- description = "<div style='display: flex;align-items: center;justify-content: space-between;'><p style='width:60vw;'>Gradio Demo for VQA medical model trained on PathVQA dataset, To use it, upload your image and type a question and click 'submit', or click one of the examples to load them.</p><a href='https://github.com/dandelin/ViLT' target='_blank' class='link'><img src='file/GitHub.png' style='justify-self:margin-top:0.5em;center; width:calc(200px + 5vw);'></a></div>"
 
170
  ### link to paper and github code
171
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2102.03334' target='_blank'>BigMed@ai</a> | <a href='https://github.com/dandelin/ViLT' target='_blank'>Github Repo</a></p>"
172
 
173
  interface = gr.Interface(fn=infer_answer_question,
174
  inputs=[image, question],
@@ -176,6 +177,5 @@ interface = gr.Interface(fn=infer_answer_question,
176
  examples=examples,
177
  title=title,
178
  description=description,
179
- article=article,
180
- enable_queue=True)
181
- interface.launch(debug=True)
 
155
  return cap_result
156
 
157
 
158
+ image = gr.Image(type="pil")
159
+ question = gr.Textbox(label="Question")
160
+ answer = gr.Textbox(label="Predicted answer")
161
  examples = [["train_0000.jpg", "Where are liver stem cells (oval cells) located?"],
162
  ["train_0001.jpg", "What are stained here with an immunohistochemical stain for cytokeratin 7?"],
163
  ["train_0002.jpg", "What are bile duct cells and canals of Hering stained here with for cytokeratin 7?"],
 
165
  ["train_0018.jpg", "Is there an infarct in the brain hypertrophy?"],
166
  ["train_0019.jpg", "What is ischemic coagulative necrosis?"]]
167
 
168
+ title = "Vision–Language Model for Visual Question Answering in Medical Imagery"
169
+ description = "Gradio Demo for VQA medical model trained on PathVQA dataset, To use it, upload your image and type a question and click 'submit', or click one of the examples to load them.<br>" \
170
+ "You can access the paper <a href='https://www.mdpi.com/2306-5354/10/3/380' target='_blank'> Vision–Language Model for Visual Question Answering in Medical Imagery</a>. Y Bazi, MMA Rahhal, L Bashmal, M Zuair - Bioengineering, 2023"
171
  ### link to paper and github code
172
+ article = "<p style='text-align: center'><a href='https://www.mdpi.com/2306-5354/10/3/380' target='_blank'>BigMed@ai</a></p>"
173
 
174
  interface = gr.Interface(fn=infer_answer_question,
175
  inputs=[image, question],
 
177
  examples=examples,
178
  title=title,
179
  description=description,
180
+ article=article)
181
+ interface.launch(debug=True, enable_queue=True)
 
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: VQA Demo
3
  emoji: 🧑‍⚕️
4
  colorFrom: yellow
5
  colorTo: red
 
1
  ---
2
+ title: Visual Question Answering in Medical Imagery
3
  emoji: 🧑‍⚕️
4
  colorFrom: yellow
5
  colorTo: red