File size: 4,092 Bytes
9e4233f
f04482d
 
 
9e4233f
 
 
 
 
 
 
 
 
 
be473e6
9e4233f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be473e6
 
 
 
 
 
 
 
 
 
 
9e4233f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f04482d
 
 
9e4233f
f04482d
 
9e4233f
 
f04482d
 
9e4233f
be473e6
9e4233f
 
 
 
 
 
 
be473e6
 
 
 
 
 
 
 
 
 
 
9e4233f
 
 
 
 
 
f04482d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import gradio as gr
from io_utils import read_scanners, write_scanners, read_inference_type, write_inference_type
from wordings import INTRODUCTION_MD, CONFIRM_MAPPING_DETAILS_MD
from text_classification_ui_helpers import try_submit, check_dataset_and_get_config, check_dataset_and_get_split, check_model_and_show_prediction, write_column_mapping_to_config

MAX_LABELS = 20
MAX_FEATURES = 20

EXAMPLE_MODEL_ID = 'cardiffnlp/twitter-roberta-base-sentiment-latest'
EXAMPLE_DATA_ID = 'tweet_eval'
CONFIG_PATH='./config.yaml'

def get_demo():
    with gr.Row():
        gr.Markdown(INTRODUCTION_MD)
    with gr.Row():
        model_id_input = gr.Textbox(
            label="Hugging Face model id",
            placeholder=EXAMPLE_MODEL_ID + " (press enter to confirm)",
        )

        dataset_id_input = gr.Textbox(
            label="Hugging Face Dataset id",
            placeholder=EXAMPLE_DATA_ID + " (press enter to confirm)",
        )
    
    with gr.Row():
        dataset_config_input = gr.Dropdown(label='Dataset Config', visible=False)
        dataset_split_input = gr.Dropdown(label='Dataset Split', visible=False)
    
    with gr.Row():
        example_input = gr.Markdown('Example Input', visible=False)
    with gr.Row():
        example_prediction = gr.Label(label='Model Prediction Sample', visible=False)
    
    with gr.Row():
        with gr.Accordion(label='Label and Feature Mapping', visible=False, open=False) as column_mapping_accordion:
            with gr.Row():
                gr.Markdown(CONFIRM_MAPPING_DETAILS_MD)
            column_mappings = []
            with gr.Row():
                with gr.Column():
                    for _ in range(MAX_LABELS):
                        column_mappings.append(gr.Dropdown(visible=False))
                with gr.Column():    
                    for _ in range(MAX_LABELS, MAX_LABELS + MAX_FEATURES):
                        column_mappings.append(gr.Dropdown(visible=False))
    
    with gr.Accordion(label='Model Wrap Advance Config (optional)', open=False):
        run_local = gr.Checkbox(value=True, label="Run in this Space")
        use_inference = read_inference_type('./config.yaml') == 'hf_inference_api'
        run_inference = gr.Checkbox(value=use_inference, label="Run with Inference API")
    
    with gr.Accordion(label='Scanner Advance Config (optional)', open=False):
        selected = read_scanners('./config.yaml')
        scan_config = selected + ['data_leakage']
        scanners = gr.CheckboxGroup(choices=scan_config, value=selected, label='Scan Settings', visible=True)

    with gr.Row():
        run_btn = gr.Button(
            "Get Evaluation Result",
            variant="primary",
            interactive=True,
            size="lg",
        )
    
    with gr.Row():
        logs = gr.Textbox(label="Giskard Bot Evaluation Log:", visible=False)
        
    gr.on(triggers=[label.change for label in column_mappings], 
          fn=write_column_mapping_to_config,
           inputs=[dataset_id_input, dataset_config_input, dataset_split_input, *column_mappings])

    gr.on(triggers=[model_id_input.change, dataset_config_input.change, dataset_split_input.change],
        fn=check_model_and_show_prediction,
        inputs=[model_id_input, dataset_id_input, dataset_config_input, dataset_split_input], 
        outputs=[example_input, example_prediction, column_mapping_accordion, *column_mappings])

    dataset_id_input.blur(check_dataset_and_get_config, dataset_id_input, dataset_config_input)

    dataset_config_input.change(
        check_dataset_and_get_split, 
        inputs=[dataset_id_input, dataset_config_input], 
        outputs=[dataset_split_input])

    scanners.change(
        write_scanners,
        inputs=scanners
    )

    run_inference.change(
        write_inference_type,
        inputs=[run_inference]
    )

    gr.on(
        triggers=[
            run_btn.click,
            ],
        fn=try_submit,
        inputs=[model_id_input, dataset_id_input, dataset_config_input, dataset_split_input, run_local],
        outputs=[run_btn, logs])