File size: 4,314 Bytes
9c6a64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf23c3b
9c6a64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf23c3b
 
 
 
 
 
9c6a64c
 
 
 
 
bf23c3b
9c6a64c
 
 
 
bf23c3b
 
 
 
 
 
 
 
 
 
9c6a64c
 
 
 
bf23c3b
9c6a64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf23c3b
 
9c6a64c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bf23c3b
 
 
 
9c6a64c
 
 
bf23c3b
 
 
 
 
 
 
 
 
9c6a64c
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
from __future__ import annotations

import numpy as np
import gradio as gr
from sklearn.svm import SVC
import plotly.graph_objects as go
from sklearn.datasets import load_digits
from sklearn.model_selection import validation_curve

def plot_validation_curve(x: np.array, ys: list[np.array], yerros: list[np.array], names: list[str], colors: list[str], log_x: bool=True, title: str=""):
    fig = go.Figure()

    for y, yerror, name, color in zip(ys, yerros, names, colors):
        y_upper = y + yerror
        y_lower = y - yerror
        
        fig.add_trace(
            go.Scatter(
                x=x,
                y=np.round(y, 3),
                name=name,
                line_color=color
            )
        )

        fig.add_trace(
            go.Scatter(
                x=x.tolist()+x[::-1].tolist(), # x, then x reversed
                y=y_upper.tolist()+y_lower[::-1].tolist(), # upper, then lower reversed
                fill='toself',
                fillcolor=color,
                line=dict(color=color),
                hoverinfo="skip",
                showlegend=False,
                opacity=0.2
            )
        )

    if log_x:
        fig.update_xaxes(type="log")

    fig.update_layout(
        title=title, 
        xaxis_title="Hyperparameter", 
        yaxis_title="Accuracy",
        hovermode="x unified",
    )

    return fig



def app_fn(n_points: int, param_name: str):
    X, y = load_digits(return_X_y=True)
    subset_mask = np.isin(y, [1, 2])  # binary classification: 1 vs 2
    X, y = X[subset_mask], y[subset_mask]

    if param_name=="gamma":
        param_range = np.logspace(-6, -1, n_points)
        log_x = True
    elif param_name=="C":
        param_range = np.logspace(-2, 0, n_points)
        log_x = True
    elif param_name=="kernel":
        param_range = np.array(["rbf", "linear", "poly", "sigmoid"])
        log_x = False

    train_scores, test_scores = validation_curve(
        SVC(),
        X,
        y,
        param_name=param_name,
        param_range=param_range,
        scoring="accuracy",
        n_jobs=-1,
    )

    train_scores_mean = np.mean(train_scores, axis=1)
    train_scores_std = np.std(train_scores, axis=1)
    test_scores_mean = np.mean(test_scores, axis=1)
    test_scores_std = np.std(test_scores, axis=1)

    fig = plot_validation_curve(
        param_range, 
        [train_scores_mean, test_scores_mean], 
        [train_scores_std, test_scores_std], 
        ["Training score", "Cross-validation score"], 
        ["orange", "navy"], 
        title=f"Validation Curve with SVM for {param_name} Hyperparameter",
        log_x=log_x
    )

    return fig

title = "Plotting Validation Curve"
with gr.Blocks(title=title) as demo:
    gr.Markdown(f"# {title}")
    gr.Markdown(
        """
        #### This example shows the usage of a validation curve to understand \
        how the performance of a model, SVM in this case, changes with varying hyperparameters. \
        The dataset used was the [digits dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits) \
        from scikit-learn. The hyperparameter varied was gamma. \
        
        [Original Example](https://scikit-learn.org/stable/auto_examples/model_selection/plot_validation_curve.html#sphx-glr-auto-examples-model-selection-plot-validation-curve-py)
        """
    )
    with gr.Row():
        n_points = gr.inputs.Slider(5, 100, 5, 5,label="Number of points")
        param_name = gr.inputs.Dropdown(["gamma", "C", "kernel"], label="Hyperparameter", default="gamma")


    fig = gr.Plot(label="Validation Curve")

    n_points.release(fn=app_fn, inputs=[n_points, param_name], outputs=[fig])
    param_name.change(fn=app_fn, inputs=[n_points, param_name], outputs=[fig])
    # C.change(fn=app_fn, inputs=[n_points, param_name, C, gamma, kernel, degree], outputs=[fig])
    # gamma.change(fn=app_fn, inputs=[n_points, param_name, C, gamma, kernel, degree], outputs=[fig])
    # kernel.change(fn=app_fn, inputs=[n_points, param_name, C, gamma, kernel, degree], outputs=[fig])
    # degree.change(fn=app_fn, inputs=[n_points, param_name, C, gamma, kernel, degree], outputs=[fig])


    demo.load(fn=app_fn, inputs=[n_points, param_name], outputs=[fig])

demo.launch()