File size: 6,352 Bytes
bc7d231
 
c7c92f9
 
dfda773
356a130
58e3cb5
63fc765
5554139
e9d7d81
fdc69a0
8cf7678
a650af8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc7d231
a650af8
 
 
 
bc7d231
9d4c7bc
0fa8d68
a650af8
9d4c7bc
d40826b
7391509
d40826b
c311ba0
d40826b
a650af8
c311ba0
 
d40826b
0fa8d68
c311ba0
9d4c7bc
 
a650af8
 
 
 
 
 
 
d40826b
 
c311ba0
 
7391509
d40826b
a650af8
d40826b
 
42aac8e
 
 
a650af8
42aac8e
 
7b1993c
a650af8
 
 
 
 
 
 
 
 
 
 
 
682bc75
d40826b
c6252cf
 
2d605ad
c6252cf
 
a650af8
c6252cf
a650af8
c6252cf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a650af8
 
 
 
 
 
f35e4aa
 
a650af8
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
import streamlit as st
import torch
import bitsandbytes
import accelerate
import scipy
import copy
from PIL import Image
import torch.nn as nn
from my_model.object_detection import detect_and_draw_objects
from my_model.captioner.image_captioning import get_caption
from my_model.utilities import free_gpu_resources
from my_model.KBVQA import KBVQA, prepare_kbvqa_model
import my_model.utilities.st_config as st_config




class ImageHandler:
    @staticmethod
    def analyze_image(image, model, show_processed_image=False):
        img = copy.deepcopy(image)
        caption = model.get_caption(img)
        image_with_boxes, detected_objects_str = model.detect_objects(img)
        if show_processed_image:
            st.image(image_with_boxes)
        return caption, detected_objects_str

    @staticmethod
    def free_gpu_resources():
        # Implementation for freeing GPU resources
        free_gpu_resources()

class QuestionAnswering:
    @staticmethod
    def answer_question(image, question, caption, detected_objects_str, model):
        answer = model.generate_answer(question, caption, detected_objects_str)
        st.image(image)
        st.write(caption)
        st.write("----------------")
        st.write(detected_objects_str)
        return answer

class UIComponents:
    @staticmethod
    def display_image_selection(sample_images):
        cols = st.columns(len(sample_images))
        for idx, sample_image_path in enumerate(sample_images):
            with cols[idx]:
                image = Image.open(sample_image_path)
                st.image(image, use_column_width=True)
                if st.button(f'Select Sample Image {idx + 1}', key=f'sample_{idx}'):
                    st.session_state['current_image'] = image
                    st.session_state['qa_history'] = []
                    st.session_state['analysis_done'] = False
                    st.session_state['answer_in_progress'] = False

def load_kbvqa_model(detection_model):
    """Load KBVQA Model based on the selected detection model."""
    if st.session_state.get('kbvqa') is not None:
        st.write("Model already loaded.")
    else:
        st.session_state['kbvqa'] = prepare_kbvqa_model(detection_model)
        if st.session_state['kbvqa']:
            st.write("Model is ready for inference.")
            return True
    return False

def set_model_confidence(detection_model):
    """Set the confidence level for the detection model."""
    default_confidence = 0.2 if detection_model == "yolov5" else 0.4
    confidence_level = st.slider(
        "Select Detection Confidence Level",
        min_value=0.1,
        max_value=0.9,
        value=default_confidence,
        step=0.1
    )
    st.session_state['kbvqa'].detection_confidence = confidence_level

def image_qa_app(kbvqa_model):
    """Streamlit app interface for image QA."""
    sample_images = st_config.SAMPLE_IMAGES
    UIComponents.display_image_selection(sample_images)

    uploaded_image = st.file_uploader("Or upload an Image", type=["png", "jpg", "jpeg"])
    if uploaded_image is not None:
        st.session_state['current_image'] = Image.open(uploaded_image)
        st.session_state['qa_history'] = []
        st.session_state['analysis_done'] = False
        st.session_state['answer_in_progress'] = False

    if st.session_state.get('current_image') and not st.session_state.get('analysis_done', False):
        if st.button('Analyze Image'):
            caption, detected_objects_str = ImageHandler.analyze_image(st.session_state['current_image'], kbvqa_model)
            st.session_state['caption'] = caption
            st.session_state['detected_objects_str'] = detected_objects_str
            st.session_state['analysis_done'] = True

    if st.session_state.get('analysis_done', False):
        question = st.text_input("Ask a question about this image:")
        if st.button('Get Answer'):
            answer = QuestionAnswering.answer_question(
                st.session_state['current_image'],
                question, 
                st.session_state.get('caption', ''), 
                st.session_state.get('detected_objects_str', ''), 
                kbvqa_model
            )
            st.session_state['qa_history'].append((question, answer))

        for q, a in st.session_state.get('qa_history', []):
            st.text(f"Q: {q}\nA: {a}\n")

def run_inference():
    """Main function to run inference based on the selected method."""
    st.title("Run Inference")

    method = st.selectbox(
        "Choose a method:",
        ["Fine-Tuned Model", "In-Context Learning (n-shots)"],
        index=0
    )

    if method == "Fine-Tuned Model":
        detection_model = st.selectbox(
            "Choose a model for object detection:",
            ["yolov5", "detic"],
            index=0
        )

        if 'kbvqa' not in st.session_state or st.session_state['detection_model'] != detection_model:
            st.session_state['detection_model'] = detection_model
            if load_kbvqa_model(detection_model):
                set_model_confidence(detection_model)
                image_qa_app(st.session_state['kbvqa'])



def main():
    st.sidebar.title("Navigation")
    selection = st.sidebar.radio("Go to", ["Home", "Dataset Analysis", "Evaluation Results", "Run Inference", "Dissertation Report"])

    if selection == "Home":
        st.title("MultiModal Learning for Knowledge-Based Visual Question Answering")
        st.write("Home page content goes here...")

    elif selection == "Dissertation Report":
        st.title("Dissertation Report")
        st.write("Click the link below to view the PDF.")
        # Example to display a link to a PDF
        st.download_button(
            label="Download PDF",
            data=open("Files/Dissertation Report.pdf", "rb"),
            file_name="example.pdf",
            mime="application/octet-stream"
        )

    elif selection == "Evaluation Results":
        st.title("Evaluation Results")
        st.write("This is a Place Holder until the contents are uploaded.")

    elif selection == "Dataset Analysis":
        st.title("OK-VQA Dataset Analysis")
        st.write("This is a Place Holder until the contents are uploaded.")

    elif selection == "Run Inference":
        run_inference()







if __name__ == "__main__":
    main()