File size: 3,283 Bytes
9c6a64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
from __future__ import annotations

import numpy as np
import gradio as gr
from sklearn.svm import SVC
import plotly.graph_objects as go
from sklearn.datasets import load_digits
from sklearn.model_selection import validation_curve

def plot_validation_curve(x: np.array, ys: list[np.array], yerros: list[np.array], names: list[str], colors: list[str], log_x: bool=True, title: str=""):
    fig = go.Figure()

    for y, yerror, name, color in zip(ys, yerros, names, colors):
        y_upper = y + yerror
        y_lower = y - yerror
        
        fig.add_trace(
            go.Scatter(
                x=x,
                y=y,
                name=name,
                line_color=color
            )
        )

        fig.add_trace(
            go.Scatter(
                x=x.tolist()+x[::-1].tolist(), # x, then x reversed
                y=y_upper.tolist()+y_lower[::-1].tolist(), # upper, then lower reversed
                fill='toself',
                fillcolor=color,
                line=dict(color=color),
                hoverinfo="skip",
                showlegend=False,
                opacity=0.2
            )
        )

    if log_x:
        fig.update_xaxes(type="log")

    fig.update_layout(title=title, xaxis_title="gamma", yaxis_title="Accuracy")

    return fig



def app_fn(n_points: int):
    X, y = load_digits(return_X_y=True)
    subset_mask = np.isin(y, [1, 2])  # binary classification: 1 vs 2
    X, y = X[subset_mask], y[subset_mask]

    param_range = np.logspace(-6, -1, n_points)
    train_scores, test_scores = validation_curve(
        SVC(),
        X,
        y,
        param_name="gamma",
        param_range=param_range,
        scoring="accuracy",
        n_jobs=-1,
    )

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    fig = plot_validation_curve(
        param_range, 
        [train_scores_mean, test_scores_mean], 
        [train_scores_std, test_scores_std], 
        ["Training score", "Cross-validation score"], 
        ["orange", "navy"], 
        title="Validation Curve with SVM for Gamma Hyperparameter"
    )

    return fig

title = "Plotting Validation Curve"
with gr.Blocks(title=title) as demo:
    gr.Markdown(f"# {title}")
    gr.Markdown(
        """
        #### This example shows the usage of a validation curve to understand \
        how the performance of a model, SVM in this case, changes with varying hyperparameters. \
        The dataset used was the [digits dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits) \
        from scikit-learn. The hyperparameter varied was gamma. \
        
        [Original Example](https://scikit-learn.org/stable/auto_examples/model_selection/plot_validation_curve.html#sphx-glr-auto-examples-model-selection-plot-validation-curve-py)
        """
    )

    n_points = gr.inputs.Slider(5, 100, 5, 5,label="Number of points")
    btn = gr.Button("Run")
    fig = gr.Plot(label="Validation Curve")

    btn.click(fn=app_fn, inputs=[n_points], outputs=[fig])
    demo.load(fn=app_fn, inputs=[n_points], outputs=[fig])

demo.launch()