File size: 17,389 Bytes
c77435d
 
8a142a6
c77435d
8a142a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c77435d
8a142a6
 
 
c77435d
8a142a6
 
 
c77435d
8a142a6
 
c77435d
8a142a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c77435d
8a142a6
 
 
 
c77435d
8a142a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c77435d
8a142a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c77435d
 
8a142a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c77435d
 
8a142a6
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
import gradio as gr
import random
import pandas as pd
import os
from utils.data_loader import get_random_example
from utils.models import generate_summaries, model_names
from utils.ui_helpers import toggle_context_display, update_feedback, get_context_html
from utils.leaderboard import load_leaderboard_data, save_leaderboard_data

# Read CSS from file
css_path = os.path.join(os.getcwd(), 'static', 'styles.css')
with open(css_path, 'r') as f:
    css_content = f.read()

# Feedback options
feedback_options = {
    "left": ["Model A: More complete", "Model A: More accurate", "Model A: More relevant", "Model A: Better written", "Model A: Better refusal (if applicable)"],
    "right": ["Model B: More complete", "Model B: More accurate", "Model B: More relevant", "Model B: Better written", "Model B: Better refusal (if applicable)"],
    "tie": ["Both complete", "Both accurate", "Both well written", "Both handle refusal well (if applicable)"],
    "neither": ["Both incomplete", "Both hallucinate", "Both irrelevant", "Both incorrectly refuse (if applicable)", "A is bad", "B is bad"]
}

def load_new_question_improved(agg_results=None, show_full=False):
    """Loads a new random question, contexts, and model summaries."""
    if agg_results is None:
        agg_results = load_leaderboard_data()
    
    example = get_random_example()
    m_a_name, m_b_name = random.sample(model_names, 2)
    s_a, s_b = generate_summaries(example, m_a_name, m_b_name)

    context_desc = example.get('processed_context_desc', '')
    if context_desc:
        context_desc = f"<div class='context-topic'><span class='topic-label'>The question and context are about:</span> {context_desc}</div>"
    
    show_full = False
    context_html = get_context_html(example, show_full=show_full)
    
    return [
        example,  # current_example
        m_a_name,  # model_a_name
        m_b_name,  # model_b_name
        s_a,  # summary_a_text
        s_b,  # summary_b_text
        None,  # selected_winner
        [],  # feedback_list
        False,  # show_results_state
        agg_results,  # results_agg
        show_full,  # show_full_context
        gr.update(value=example['question']),  # query_display
        gr.update(value=context_desc, visible=bool(context_desc)),  # context_description
        gr.update(value=context_html),  # context_display
        gr.update(value="Show Full Context", elem_classes=["context-toggle-button"]),  # context_toggle_btn
        gr.update(value=s_a),  # summary_a_display
        gr.update(value=s_b),  # summary_b_display
        gr.update(interactive=True, elem_classes=["vote-button"]),  # vote_button_a
        gr.update(interactive=True, elem_classes=["vote-button"]),  # vote_button_b
        gr.update(interactive=True, elem_classes=["vote-button"]),  # vote_button_tie
        gr.update(interactive=True, elem_classes=["vote-button", "vote-button-neither"]),  # vote_button_neither
        gr.update(choices=[], value=[], interactive=False, visible=False),  # feedback_checkboxes
        gr.update(visible=False),  # feedback_section
        gr.update(interactive=False, visible=True),  # submit_button
        gr.update(visible=False),  # results_reveal_area
        gr.update(interactive=True),  # random_question_btn
        gr.update(elem_classes=[])  # main_interface_area
    ]

def select_vote_improved(winner_choice):
    """Handles vote button selections."""
    feedback_choices = feedback_options.get(winner_choice, [])

    btn_a_classes = ["vote-button"]
    btn_b_classes = ["vote-button"]
    btn_tie_classes = ["vote-button"]
    btn_neither_classes = ["vote-button", "vote-button-neither"]
    
    if winner_choice == 'left':
        btn_a_classes.append("selected")
    elif winner_choice == 'right':
        btn_b_classes.append("selected")
    elif winner_choice == 'tie':
        btn_tie_classes.append("selected")
    elif winner_choice == 'neither':
        btn_neither_classes.append("selected")

    return [
        winner_choice,  # selected_winner
        gr.update(choices=feedback_choices, value=[], interactive=True, visible=True),  # feedback_checkboxes
        gr.update(visible=True),  # feedback_section
        gr.update(interactive=True),  # submit_button
        gr.update(elem_classes=btn_a_classes),  # vote_button_a
        gr.update(elem_classes=btn_b_classes),  # vote_button_b
        gr.update(elem_classes=btn_tie_classes),  # vote_button_tie
        gr.update(elem_classes=btn_neither_classes)  # vote_button_neither
    ]

def submit_vote_fixed(m_a, m_b, winner, feedback, current_results):
    """Processes vote submission and updates results."""
    if winner is None:
        print("Warning: Submit called without a winner selected.")
        return {}

    updated_results = current_results.copy()
    models_involved = [m_a, m_b]
    for model in models_involved:
         if model not in updated_results["wins"]:
            updated_results["wins"][model] = 0
            updated_results["losses"][model] = 0
            updated_results["ties"][model] = 0

    if winner == 'left':
        updated_results["wins"][m_a] = updated_results["wins"].get(m_a, 0) + 1
        updated_results["losses"][m_b] = updated_results["losses"].get(m_b, 0) + 1
    elif winner == 'right':
        updated_results["wins"][m_b] = updated_results["wins"].get(m_b, 0) + 1
        updated_results["losses"][m_a] = updated_results["losses"].get(m_a, 0) + 1
    elif winner == 'tie':
        updated_results["ties"][m_a] = updated_results["ties"].get(m_a, 0) + 1
        updated_results["ties"][m_b] = updated_results["ties"].get(m_b, 0) + 1

    updated_results["votes"] = updated_results.get("votes", 0) + 1
    save_leaderboard_data(updated_results)

    # Prepare Results Table
    results_list = []
    all_models = list(set(list(updated_results["wins"].keys()) + list(updated_results["losses"].keys()) + list(updated_results["ties"].keys())))

    for model in sorted(all_models):
        wins = updated_results["wins"].get(model, 0)
        losses = updated_results["losses"].get(model, 0)
        ties = updated_results["ties"].get(model, 0)
        total_comparisons = wins + losses + ties
        win_rate = (wins + 0.5 * ties) / total_comparisons if total_comparisons > 0 else 0.0
        results_list.append({
            "Model": model,
            "Win Rate (%)": f"{win_rate:.1%}",
            "Wins": wins,
            "Losses": losses,
            "Ties": ties,
            "Comparisons": total_comparisons
        })
        
    results_df = pd.DataFrame(results_list)
    if not results_df.empty:
        results_df['Win Rate Value'] = results_df['Win Rate (%)'].str.rstrip('%').astype('float') / 100.0
        results_df = results_df.sort_values(by='Win Rate Value', ascending=False).drop(columns=['Win Rate Value'])

    return [
        True,  # show_results_state
        updated_results,  # results_agg
        gr.update(interactive=False),  # vote_button_a
        gr.update(interactive=False),  # vote_button_b
        gr.update(interactive=False),  # vote_button_tie
        gr.update(interactive=False),  # vote_button_neither
        gr.update(interactive=False),  # feedback_checkboxes
        gr.update(visible=True),  # feedback_section
        gr.update(visible=False),  # submit_button
        gr.update(visible=True),  # results_reveal_area
        gr.update(interactive=False),  # random_question_btn
        gr.update(value=results_df, visible=True),  # results_table_display
        gr.update(elem_classes=["results-revealed"]),  # main_interface_area
        gr.update(interactive=True),  # context_toggle_btn
        gr.update(value=m_a),  # model_a_reveal
        gr.update(value=m_b)  # model_b_reveal
    ]

# Create embedded CSS
css_html = f"<style>{css_content}</style>"

# Create Gradio interface
with gr.Blocks(theme=gr.themes.Default(
    primary_hue=gr.themes.colors.orange,
    secondary_hue=gr.themes.colors.slate
)) as demo:
    # Embed CSS directly in HTML
    gr.HTML(css_html)

    # State Variables
    current_example = gr.State({})
    model_a_name = gr.State("")
    model_b_name = gr.State("")
    summary_a_text = gr.State("")
    summary_b_text = gr.State("")
    selected_winner = gr.State(None)
    feedback_list = gr.State([])
    show_results_state = gr.State(False)
    results_agg = gr.State({"wins": {}, "losses": {}, "ties": {}, "votes": 0})
    show_full_context = gr.State(False)

    # Create Tabs
    with gr.Tabs() as tabs:
        # Main Arena Tab
        with gr.TabItem("Arena", id="arena-tab"):
            # Main title and description
            gr.Markdown("# RAG Summarizer Arena")
            gr.Markdown("Compare summaries generated by different models based on the provided context and query. Select the better summary, or choose 'Tie' or 'Neither'. Your feedback helps evaluate model performance.")

            # Main container
            with gr.Column(elem_id="main-interface-area") as main_interface_area:
                # Query section
                with gr.Row(elem_id="query-title-row"):
                    gr.Markdown("### Query", elem_classes="section-heading")

                with gr.Row(elem_id="query-container"):
                    with gr.Row(elem_classes="query-box-row"):
                        query_display = gr.Markdown(value="Loading question...", elem_classes="query-text")
                    random_question_btn = gr.Button("🔄 Get Random Question", elem_classes="query-button")
                
                # Context description
                context_description = gr.Markdown("", elem_classes="context-description")

                # Context section
                with gr.Row(elem_id="context-header-row"):
                    gr.Markdown("### Context Provided", elem_classes="context-title")
                    context_toggle_btn = gr.Button("Show Full Context", elem_classes=["context-toggle-button"])
                    
                context_display = gr.HTML(value="Loading context...", label="Context Chunks")

                gr.Markdown("---")
                gr.Markdown("### Compare Summaries", elem_classes="section-heading")

                # Model summaries
                with gr.Row():
                    with gr.Column(scale=1):
                        with gr.Group(elem_classes=["summary-card", "summary-card-a"]):
                            summary_a_display = gr.Textbox(label="Model A", lines=10, interactive=False, show_copy_button=True)
                    with gr.Column(scale=1):
                        with gr.Group(elem_classes=["summary-card", "summary-card-b"]):
                            summary_b_display = gr.Textbox(label="Model B", lines=10, interactive=False, show_copy_button=True)

                # Voting section
                gr.Markdown("### Cast Your Vote", elem_classes="section-heading")
                with gr.Row():
                    vote_button_a = gr.Button("⬅️ Summary A is Better", elem_classes=["vote-button"])
                    vote_button_tie = gr.Button("🤝 Tie / Equally Good", elem_classes=["vote-button"])
                    vote_button_b = gr.Button("➡️ Summary B is Better", elem_classes=["vote-button"])
                    vote_button_neither = gr.Button("❌ Neither is Adequate", elem_classes=["vote-button", "vote-button-neither"])

                # Feedback section
                with gr.Group(elem_classes=["feedback-section"], visible=False) as feedback_section:
                    feedback_checkboxes = gr.CheckboxGroup(label="Feedback (optional)", choices=[], interactive=False)

                # Submit button
                submit_button = gr.Button("Submit Vote", variant="primary", interactive=False, elem_id="submit-button")

                # Results area
                with gr.Column(visible=False) as results_reveal_area:
                    gr.Markdown("---")
                    gr.Markdown("### ✅ Vote Submitted!", elem_classes="section-heading")
                     
                    # Model reveal section
                    with gr.Row():
                        with gr.Column(scale=1):
                            gr.Markdown("### Model A was actually:", elem_classes="section-heading")
                            model_a_reveal = gr.Markdown("", elem_classes="model-reveal model-a-reveal")
                        with gr.Column(scale=1):
                            gr.Markdown("### Model B was actually:", elem_classes="section-heading")
                            model_b_reveal = gr.Markdown("", elem_classes="model-reveal model-b-reveal")
                     
                    gr.HTML("<div style='height: 10px;'></div>")
                     
                    # Try another button
                    with gr.Row(elem_classes=["control-buttons"]):
                        try_another_btn = gr.Button("🔄 Try Another Question", elem_id="try-another-btn")

        # Leaderboard Tab
        with gr.TabItem("Leaderboard", id="leaderboard-tab"):
            gr.Markdown("# Model Performance Leaderboard")
            gr.Markdown("View aggregate performance statistics for all models. The table below shows win rates, wins, losses, and ties for each model based on all evaluations.")
            results_table_display = gr.DataFrame(label="Model Performance", interactive=False, wrap=True)

    # Event Listeners
    context_toggle_btn.click(
        fn=toggle_context_display,
        inputs=[current_example, show_full_context],
        outputs=[show_full_context, context_display, context_toggle_btn]
    )
    
    demo.load(
        fn=load_new_question_improved,
        inputs=[],
        outputs=[
            current_example, model_a_name, model_b_name, summary_a_text, summary_b_text,
            selected_winner, feedback_list, show_results_state, results_agg, show_full_context,
            query_display, context_description, context_display, context_toggle_btn,
            summary_a_display, summary_b_display,
            vote_button_a, vote_button_b, vote_button_tie, vote_button_neither,
            feedback_checkboxes, feedback_section, submit_button, results_reveal_area, random_question_btn,
            main_interface_area
        ]
    )

    random_question_btn.click(
        fn=load_new_question_improved,
        inputs=[],
        outputs=[
            current_example, model_a_name, model_b_name, summary_a_text, summary_b_text,
            selected_winner, feedback_list, show_results_state, results_agg, show_full_context,
            query_display, context_description, context_display, context_toggle_btn,
            summary_a_display, summary_b_display,
            vote_button_a, vote_button_b, vote_button_tie, vote_button_neither,
            feedback_checkboxes, feedback_section, submit_button, results_reveal_area, random_question_btn,
            main_interface_area
        ]
    )

    vote_button_a.click(
        fn=lambda: select_vote_improved('left'),
        inputs=None,
        outputs=[selected_winner, feedback_checkboxes, feedback_section, submit_button, vote_button_a, vote_button_b, vote_button_tie, vote_button_neither]
    )
    vote_button_b.click(
        fn=lambda: select_vote_improved('right'),
        inputs=None,
        outputs=[selected_winner, feedback_checkboxes, feedback_section, submit_button, vote_button_a, vote_button_b, vote_button_tie, vote_button_neither]
    )
    vote_button_tie.click(
        fn=lambda: select_vote_improved('tie'),
        inputs=None,
        outputs=[selected_winner, feedback_checkboxes, feedback_section, submit_button, vote_button_a, vote_button_b, vote_button_tie, vote_button_neither]
    )
    vote_button_neither.click(
        fn=lambda: select_vote_improved('neither'),
        inputs=None,
        outputs=[selected_winner, feedback_checkboxes, feedback_section, submit_button, vote_button_a, vote_button_b, vote_button_tie, vote_button_neither]
    )

    feedback_checkboxes.change(
        fn=update_feedback,
        inputs=[feedback_checkboxes],
        outputs=[feedback_list]
    )

    submit_button.click(
        fn=submit_vote_fixed,
        inputs=[model_a_name, model_b_name, selected_winner, feedback_list, results_agg],
        outputs=[
            show_results_state, results_agg,
            vote_button_a, vote_button_b, vote_button_tie, vote_button_neither,
            feedback_checkboxes,
            feedback_section,
            submit_button,
            results_reveal_area,
            random_question_btn,
            results_table_display,
            main_interface_area,
            context_toggle_btn,
            model_a_reveal,
            model_b_reveal
        ]
    )

    try_another_btn.click(
        fn=load_new_question_improved,
        inputs=[],
        outputs=[
            current_example, model_a_name, model_b_name, summary_a_text, summary_b_text,
            selected_winner, feedback_list, show_results_state, results_agg, show_full_context,
            query_display, context_description, context_display, context_toggle_btn,
            summary_a_display, summary_b_display,
            vote_button_a, vote_button_b, vote_button_tie, vote_button_neither,
            feedback_checkboxes, feedback_section, submit_button, results_reveal_area, random_question_btn,
            main_interface_area
        ]
    )

if __name__ == "__main__":
    demo.launch(debug=True)