Spaces:
Runtime error
Runtime error
bigmed@bigmed
commited on
Commit
·
655d0c0
1
Parent(s):
97a044f
upload vqa demo
Browse files- .idea/.gitignore +3 -0
- .idea/VQA_Demo.iml +8 -0
- .idea/inspectionProfiles/Project_Default.xml +12 -0
- .idea/inspectionProfiles/profiles_settings.xml +6 -0
- .idea/misc.xml +4 -0
- .idea/modules.xml +8 -0
- .idea/vcs.xml +6 -0
- MED_VQA_Huggyface_Gradio.py +46 -0
- cats.jpg +0 -0
- flagged/image/tmp6px7agq4.jpg +0 -0
- flagged/log.csv +2 -0
- requirements.txt +2 -0
.idea/.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# Default ignored files
|
2 |
+
/shelf/
|
3 |
+
/workspace.xml
|
.idea/VQA_Demo.iml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<module type="PYTHON_MODULE" version="4">
|
3 |
+
<component name="NewModuleRootManager">
|
4 |
+
<content url="file://$MODULE_DIR$" />
|
5 |
+
<orderEntry type="jdk" jdkName="bigmed" jdkType="Python SDK" />
|
6 |
+
<orderEntry type="sourceFolder" forTests="false" />
|
7 |
+
</component>
|
8 |
+
</module>
|
.idea/inspectionProfiles/Project_Default.xml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<profile version="1.0">
|
3 |
+
<option name="myName" value="Project Default" />
|
4 |
+
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
|
5 |
+
<option name="ignoredErrors">
|
6 |
+
<list>
|
7 |
+
<option value="N806" />
|
8 |
+
</list>
|
9 |
+
</option>
|
10 |
+
</inspection_tool>
|
11 |
+
</profile>
|
12 |
+
</component>
|
.idea/inspectionProfiles/profiles_settings.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<component name="InspectionProjectProfileManager">
|
2 |
+
<settings>
|
3 |
+
<option name="USE_PROJECT_PROFILE" value="false" />
|
4 |
+
<version value="1.0" />
|
5 |
+
</settings>
|
6 |
+
</component>
|
.idea/misc.xml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectRootManager" version="2" project-jdk-name="bigmed" project-jdk-type="Python SDK" />
|
4 |
+
</project>
|
.idea/modules.xml
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="ProjectModuleManager">
|
4 |
+
<modules>
|
5 |
+
<module fileurl="file://$PROJECT_DIR$/.idea/VQA_Demo.iml" filepath="$PROJECT_DIR$/.idea/VQA_Demo.iml" />
|
6 |
+
</modules>
|
7 |
+
</component>
|
8 |
+
</project>
|
.idea/vcs.xml
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
2 |
+
<project version="4">
|
3 |
+
<component name="VcsDirectoryMappings">
|
4 |
+
<mapping directory="$PROJECT_DIR$/.." vcs="Git" />
|
5 |
+
</component>
|
6 |
+
</project>
|
MED_VQA_Huggyface_Gradio.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
##### VQA MED Demo
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
from transformers import ViltProcessor, ViltForQuestionAnswering
|
5 |
+
import torch
|
6 |
+
|
7 |
+
torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
|
8 |
+
|
9 |
+
processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
|
10 |
+
model = ViltForQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
|
11 |
+
|
12 |
+
|
13 |
+
def answer_question(image, text):
|
14 |
+
encoding = processor(image, text, return_tensors="pt")
|
15 |
+
|
16 |
+
# forward pass
|
17 |
+
with torch.no_grad():
|
18 |
+
outputs = model(**encoding)
|
19 |
+
|
20 |
+
logits = outputs.logits
|
21 |
+
idx = logits.argmax(-1).item()
|
22 |
+
predicted_answer = model.config.id2label[idx]
|
23 |
+
|
24 |
+
return predicted_answer
|
25 |
+
|
26 |
+
|
27 |
+
image = gr.inputs.Image(type="pil")
|
28 |
+
question = gr.inputs.Textbox(label="Question")
|
29 |
+
answer = gr.outputs.Textbox(label="Predicted answer")
|
30 |
+
examples = [["cats.jpg", "How many cats are there?"]]
|
31 |
+
|
32 |
+
title = "Interactive Visual Question Answering demo(BigMed@ai: Artificial Intelligence for Large-Scale Medical Image Analysis)"
|
33 |
+
description = "Gradio Demo for VQA medical model trained on PathVQA dataset, To use it, upload your image and type a question and click 'submit', or click one of the examples to load them."
|
34 |
+
|
35 |
+
### link to paper and github code
|
36 |
+
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2102.03334' target='_blank'>BigMed@ai</a> | <a href='https://github.com/dandelin/ViLT' target='_blank'>Github Repo</a></p>"
|
37 |
+
|
38 |
+
interface = gr.Interface(fn=answer_question,
|
39 |
+
inputs=[image, question],
|
40 |
+
outputs=answer,
|
41 |
+
examples=examples,
|
42 |
+
title=title,
|
43 |
+
description=description,
|
44 |
+
article=article,
|
45 |
+
enable_queue=True)
|
46 |
+
interface.launch(debug=True)
|
cats.jpg
ADDED
![]() |
flagged/image/tmp6px7agq4.jpg
ADDED
![]() |
flagged/log.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
image,Question,Predicted answer,flag,username,timestamp
|
2 |
+
D:\2023\BigMed_Demos\VQA_Demo\flagged\image\tmp6px7agq4.jpg,How many cats are there?,2,,,2022-12-26 01:49:33.791750
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
torch == 1.13.1
|
2 |
+
transformers == 4.25.1
|