File size: 15,854 Bytes
fe1089d
69b34c4
 
 
 
fe1089d
b6f8496
fe1089d
 
69b34c4
31b505c
69b34c4
b6f8496
fe1089d
d2116db
69b34c4
2492536
 
 
fe1089d
2492536
 
 
 
3615ad5
 
 
 
a597c76
2492536
 
3615ad5
 
 
 
 
 
 
2492536
 
b6f8496
 
fe1089d
 
 
 
 
 
 
b6f8496
 
0f77c21
fe1089d
 
a597c76
 
 
 
 
 
fe1089d
 
 
 
 
 
 
 
 
 
 
 
7ad098c
dacf466
 
 
fe1089d
7ad098c
2492536
a597c76
fe1089d
3615ad5
 
fe1089d
 
 
b6f8496
 
fe1089d
 
 
3f2a988
b6f8496
 
 
 
 
fe1089d
 
b6f8496
fe1089d
 
 
 
 
31b505c
 
b6f8496
fe1089d
58a02af
d4dd3c5
 
 
 
 
 
 
 
6ff516d
 
 
 
d4dd3c5
 
 
 
 
 
 
c7e16d0
 
 
d4dd3c5
 
 
 
 
 
 
 
 
 
 
b0721f8
d4dd3c5
 
 
fe1089d
d4dd3c5
b0721f8
 
 
fe1089d
 
 
2492536
d2116db
 
 
 
58a02af
 
2492536
67a34bd
2492536
d2116db
 
58a02af
 
 
 
d2116db
2492536
d2116db
 
 
 
 
 
a597c76
d2116db
dacf466
 
a597c76
dacf466
2492536
d2116db
 
 
 
 
 
 
 
 
fe1089d
d2116db
 
 
fe1089d
 
b6f8496
31b505c
2492536
fe1089d
2492536
fe1089d
2492536
 
fe1089d
2492536
d2116db
b324c38
 
 
 
 
 
1f063be
 
 
67a34bd
 
1f063be
67a34bd
a597c76
 
67a34bd
 
1f063be
67a34bd
 
 
1f063be
 
 
 
 
 
67a34bd
1f063be
 
 
 
67a34bd
a597c76
 
67a34bd
 
1f063be
67a34bd
c7e16d0
 
 
 
517fd4c
1f063be
 
fe1089d
b324c38
 
 
 
 
 
 
 
67a34bd
517fd4c
b324c38
a597c76
 
 
 
 
b324c38
67a34bd
b324c38
 
a597c76
 
517fd4c
a597c76
 
 
 
 
 
 
 
 
 
 
 
517fd4c
a597c76
 
 
 
b324c38
 
 
bf15c20
b324c38
67a34bd
b324c38
fe1089d
b324c38
b6f8496
 
 
fe1089d
b6f8496
fe1089d
 
31b505c
 
b6f8496
fe1089d
e78052e
d2116db
31b505c
f5ebee7
fe1089d
49066ce
 
fe1089d
 
3615ad5
fe1089d
d4dd3c5
 
 
 
 
b6f8496
fe1089d
 
 
 
 
 
5d99c07
 
 
 
 
 
 
 
4577044
fe1089d
 
 
 
5d99c07
 
 
 
 
 
 
 
4577044
fe1089d
b6f8496
fe1089d
 
69b34c4
2492536
69b34c4
 
a597c76
69b34c4
b6f8496
fe1089d
b6f8496
 
2492536
b6f8496
fe1089d
69b34c4
 
2492536
bf15c20
69b34c4
 
fe1089d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
# main application file initializing the gradio based ui and calling other

# standard imports
import os

# external imports
from fastapi import FastAPI
import markdown
import gradio as gr
from uvicorn import run
from gradio_iframe import iFrame

# internal imports
from backend.controller import interference
from explanation.markup import color_codes


# global Variables and js/css
# creating FastAPI app and getting color codes
app = FastAPI()
coloring = color_codes()


# defining custom css and js for certain environments
css = """
    .examples {text-align: start;}
    .seperatedRow {border-top: 1rem solid;}",
    """
# custom js to force light mode in custom environments
if os.environ["HOSTING"].lower() != "spaces":
    js = """
    function () {
        gradioURL = window.location.href
        if (!gradioURL.endsWith('?__theme=light')) {
        window.location.replace(gradioURL + '?__theme=light');
        }
    }
    """
else:
    js = ""


# different functions to provide frontend abilities
# function to load markdown files
def load_md(path):
    # CREDIT: official python-markdown documentation
    ## see https://python-markdown.github.io/reference/)
    with open(path, "r", encoding="utf-8") as file:
        text = file.read()
    return markdown.markdown(text)


# function to display the system prompt info
def system_prompt_info(sys_prompt_txt):
    if sys_prompt_txt == "":
        sys_prompt_txt = """
            You are a helpful, respectful and honest assistant.
            Always answer as helpfully as possible, while being safe.
        """

    # display the system prompt using the Gradio Info component
    gr.Info(f"The system prompt was set to:\n {sys_prompt_txt}")


# function to display the xai info
def xai_info(xai_radio):
    # display the xai method using the Gradio Info component
    if xai_radio != "None":
        gr.Info(f"The XAI was set to:\n {xai_radio}")
    else:
        gr.Info("No XAI method was selected.")


def model_info(model_radio):
    # displays the selected model using the Gradio Info component
    gr.Info(f"The following model was selected:\n {model_radio} ")


# ui interface based on Gradio Blocks
# see https://www.gradio.app/docs/interface
with gr.Blocks(
    css=css,
    js=js,
    title="Thesis Webapp Showcase",
    head="<head>",
) as ui:
    # header row with markdown based text
    with gr.Row():
        # markdown component to display the header
        gr.Markdown("""
            # Thesis Demo - AI Chat Application with GODEL
            Interpretability powered by shap and attention visualization,
            ### Select between tabs below for the different views.
            """)
    # ChatBot tab used to chat with the AI chatbot
    with gr.Tab("AI ChatBot"):
        with gr.Row():
            # markdown component to display the header of the current tab
            gr.Markdown("""
                ### ChatBot Demo
                Chat with the AI ChatBot using the textbox below.
                Manipulate the settings in the row above,
                including the selection of the model,
                the system prompt and the XAI method.

                **See Explanations in the accordion above the chat.**

                """)
        # row with columns for the different settings
        with gr.Row(equal_height=True):
            with gr.Accordion("Application Settings", open=False):
                # column that takes up 3/4 of the row
                with gr.Column(scale=2):
                    # textbox to enter the system prompt
                    system_prompt = gr.Textbox(
                        label="System Prompt",
                        info="Set the models system prompt, dictating how it answers.",
                        # default system prompt is set to this in the backend
                        placeholder=(
                            "You are a helpful, respectful and honest assistant. Always"
                            " answer as helpfully as possible, while being safe."
                        ),
                    )
                # column that takes up 1/4 of the row
                with gr.Column(scale=1):
                    # checkbox group to select the xai method
                    xai_selection = gr.Radio(
                        ["None", "SHAP", "Attention"],
                        label="Interpretability Settings",
                        info=(
                            "Select a Interpretability Approach Implementation to use."
                        ),
                        value="None",
                        interactive=True,
                        show_label=True,
                    )
                # column that takes up 1/4 of the row
                with gr.Column(scale=1):
                    # checkbox group to select the xai method
                    model_selection = gr.Radio(
                        ["GODEL", "Mistral"],
                        label="Model Settings",
                        info="Select a Model to use.",
                        value="Mistral",
                        interactive=True,
                        show_label=True,
                    )

                # calling info functions on inputs/submits for different settings
                system_prompt.change(system_prompt_info, [system_prompt])
                xai_selection.change(xai_info, [xai_selection])
                model_selection.change(model_info, [model_selection])

        # row with chatbot ui displaying "conversation" with the model
        with gr.Row(equal_height=True):
            # group to display components closely together
            with gr.Group(elem_classes="border: 1px solid black;"):
                # accordion to display the normalized input explanation
                with gr.Accordion(label="Input Explanation", open=False):
                    gr.Markdown("""
                    The explanations are based on 10 buckets that range between the
                    lowest negative value (1 to 5) and the highest positive attribution value (6 to 10).
                    **The legend shows the color for each bucket.**

                    *HINT*: This works best in light mode.
                    """)
                    xai_text = gr.HighlightedText(
                        color_map=coloring,
                        label="Input Explanation",
                        show_legend=True,
                        show_label=False,
                    )
                # out of the  box chatbot component with avatar images
                # see documentation: https://www.gradio.app/docs/chatbot
                chatbot = gr.Chatbot(
                    layout="panel",
                    show_copy_button=True,
                    avatar_images=("./public/human.jpg", "./public/bot.jpg"),
                )
                # extendable components for extra knowledge
                with gr.Accordion(label="Additional Knowledge", open=False):
                    gr.Markdown("""
                        *Hint:* Add extra knowledge to see GODEL work the best.
                        Knowledge doesn't work with Mistral and will be ignored.
                        """)
                    # textbox to enter the knowledge
                    knowledge_input = gr.Textbox(
                        value="",
                        label="Knowledge",
                        max_lines=5,
                        info="Add additional context knowledge.",
                        show_label=True,
                    )
                user_prompt = gr.Textbox(
                    label="Input Message",
                    max_lines=5,
                    info="""
                    Ask the ChatBot a question.
                    """,
                    show_label=True,
                )
        # row with columns for buttons to submit and clear content
        with gr.Row(elem_classes=""):
            with gr.Column():
                # out of the box clear button which clearn the given components (see
                # see: https://www.gradio.app/docs/clearbutton)
                clear_btn = gr.ClearButton([user_prompt, chatbot])
            with gr.Column():
                # submit button that calls the backend functions on click
                submit_btn = gr.Button("Submit", variant="primary")
        # row with content examples that get autofilled on click
        with gr.Row(elem_classes="examples"):
            with gr.Accordion("Mistral Model Examples", open=False):
                # examples util component
                # see: https://www.gradio.app/docs/examples
                gr.Examples(
                    label="Example Questions",
                    examples=[
                        ["Does money buy happiness?", "None", "", "Mistral", ""],
                        ["Does money buy happiness?", "SHAP", "", "Mistral", ""],
                        ["Does money buy happiness?", "Attention", "", "Mistral", ""],
                        [
                            "Does money buy happiness?",
                            "None",
                            (
                                "Respond from the perspective of billionaire heir"
                                " living his best life with his father's money."
                            ),
                            "Mistral",
                            "",
                        ],
                        [
                            "Does money buy happiness?",
                            "SHAP",
                            (
                                "Respond from the perspective of billionaire heir"
                                " living his best life with his father's money."
                            ),
                            "Mistral",
                            "",
                        ],
                        [
                            "Does money buy happiness?",
                            "Attention",
                            (
                                "Respond from the perspective of billionaire heir"
                                " living his best life with his father's money."
                            ),
                            "Mistral",
                            "",
                        ],
                    ],
                    inputs=[
                        user_prompt,
                        xai_selection,
                        system_prompt,
                        model_selection,
                        knowledge_input,
                    ],
                )
            with gr.Accordion("GODEL Model Examples", open=False):
                # examples util component
                # see: https://www.gradio.app/docs/examples
                gr.Examples(
                    label="Example Questions",
                    examples=[
                        [
                            "Does money buy happiness?",
                            "SHAP",
                            (
                                "Some studies have found a correlation between income"
                                " and happiness, but this relationship often has"
                                " diminishing returns. From a psychological standpoint,"
                                " it's not just having money, but how it is used that"
                                " influences happiness."
                            ),
                            "",
                            "GODEL",
                        ],
                        [
                            "Does money buy happiness?",
                            "Attention",
                            (
                                "Some studies have found a correlation between income"
                                " and happiness, but this relationship often has"
                                " diminishing returns. From a psychological standpoint,"
                                " it's not just having money, but how it is used that"
                                " influences happiness."
                            ),
                            "",
                            "GODEL",
                        ],
                        [
                            "Does money buy happiness?",
                            "Attention",
                            "",
                            "",
                            "GODEL",
                        ],
                    ],
                    inputs=[
                        user_prompt,
                        xai_selection,
                        knowledge_input,
                        system_prompt,
                        model_selection,
                    ],
                )

    # explanations tab used to provide explanations for a specific conversation
    with gr.Tab("Explanations"):
        # row with markdown component to display the header of the current tab
        with gr.Row():
            gr.Markdown("""
                ### Get Explanations for Conversations
                Get additional explanations for the last conversation you had with the AI ChatBot.
                Depending on the selected XAI method, different explanations are available.
                """)
        # row that displays the generated explanation of the model (if applicable)
        with gr.Row():
            # wraps the explanation html to display it statically
            xai_interactive = iFrame(
                label="Interactive Explanation",
                value=(
                    '<div style="text-align: center; font-family:arial;"><h4>No Graphic'
                    " to Display (Yet)</h4></div>"
                ),
                show_label=True,
                height="400px",
            )
        with gr.Row():
            with gr.Accordion("Explanation Plot", open=False):
                xai_plot = gr.Plot(
                    label="Input Sequence Attribution Plot", show_label=True
                )

    # functions to trigger the controller
    ## takes information for the chat and the xai selection
    ## returns prompt, history and xai data
    ## see backend/controller.py for more information
    submit_btn.click(
        interference,
        [
            user_prompt,
            chatbot,
            knowledge_input,
            system_prompt,
            xai_selection,
            model_selection,
        ],
        [user_prompt, chatbot, xai_interactive, xai_text, xai_plot],
    )
    # function triggered by the enter key
    user_prompt.submit(
        interference,
        [
            user_prompt,
            chatbot,
            knowledge_input,
            system_prompt,
            xai_selection,
            model_selection,
        ],
        [user_prompt, chatbot, xai_interactive, xai_text, xai_plot],
    )

    # final row to show legal information
    ## - credits, data protection and link to the License
    with gr.Tab(label="About"):
        # load about.md markdown
        gr.Markdown(value=load_md("public/about.md"))
        with gr.Accordion(label="Credits, Data Protection, License"):
            # load credits and data protection markdown
            gr.Markdown(value=load_md("public/credits_dataprotection_license.md"))

# mount function for fastAPI Application
app = gr.mount_gradio_app(app, ui, path="/")

# launch function to launch the application
if __name__ == "__main__":

    # use standard gradio launch option for hgf spaces
    if os.environ["HOSTING"].lower() == "spaces":
        # set password to deny public access
        ui.launch(auth=(os.environ["USER"], os.environ["PW"]))

    # otherwise run the application on port 8080 in reload mode
    ## for local development, uses Docker for Prod deployment
    run("main:app", port=8080, reload=True)