import numpy as np import gradio as gr from sklearn.datasets import make_biclusters from sklearn.cluster import SpectralCoclustering from sklearn.metrics import consensus_score import plotly.express as px score = [0.0] def dataset(n_clusters=5, noise=5, n_rows=300, n_cols=300): data, rows, columns = make_biclusters( shape=(n_rows, n_cols), n_clusters=n_clusters, noise=noise, shuffle=False, random_state=0, ) fig = px.imshow(data, title="Original Data") return fig def shuffle_dataset(n_clusters=5, noise=5, n_rows=300, n_cols=300): data, rows, columns = make_biclusters( shape=(n_rows, n_cols), n_clusters=n_clusters, noise=noise, shuffle=False, random_state=0, ) rng = np.random.RandomState(0) row_idx = rng.permutation(data.shape[0]) col_idx = rng.permutation(data.shape[1]) data = data[row_idx][:, col_idx] fig = px.imshow(data, title="Shuffled Data") return fig def model_fit(n_cluster, noise, n_rows, n_cols, n_clusters, svd_method): data, rows, columns = make_biclusters( shape=(n_rows, n_cols), n_clusters=n_cluster, noise=noise, shuffle=False, random_state=0, ) fig_original = px.imshow(data, title="Original Data") rng = np.random.RandomState(0) row_idx = rng.permutation(data.shape[0]) col_idx = rng.permutation(data.shape[1]) data = data[row_idx][:, col_idx] fig_shuffled = px.imshow(data, title="Shuffled Data") model = SpectralCoclustering( n_clusters=n_clusters, random_state=0, svd_method=svd_method ) model.fit(data) score.append( consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) ) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)].T fig = px.imshow(fit_data, title="After Co-Clustering") return fig_original, fig_shuffled, fig def get_score(): return score[-1].__format__(".3f") with gr.Blocks() as demo: gr.Markdown("## Spectral Co-Clustering") gr.Markdown( "Demo is based on the [Spectral Co-Clustering](https://scikit-learn.org/stable/auto_examples/bicluster/plot_spectral_coclustering.html) example from scikit-learn. The goal of co-clustering is to find subgroups of rows and columns that are highly correlated. The data is first shuffled, then the rows and columns are reordered to match the biclusters. The consensus score is a measure of how well the biclusters found by the model match the true biclusters. The score is between 0 and 1, with 1 being a perfect match." ) with gr.Tab("Data"): gr.Markdown("## Play with the parameters to see how the data changes") gr.Markdown("### Parameters") with gr.Row(): n_rows = gr.Slider(1, 500, label="Number of Rows", value=300, step=1) n_cols = gr.Slider(1, 500, label="Number of Columns", value=300, step=1) n_cluster = gr.Slider(1, 50, label="Number of Clusters", value=5, step=1) noise = gr.Slider(0, 10, label="Noise", value=5, step=1) with gr.Row(): gen_btn = gr.Button("Generate Data") shu_btn = gr.Button("Shuffle Data") with gr.Row(): gen_btn.click( fn=dataset, inputs=[n_cluster, noise, n_rows, n_cols], outputs=gr.Plot() ) shu_btn.click( fn=shuffle_dataset, inputs=[n_cluster, noise, n_rows, n_cols], outputs=gr.Plot(), ) with gr.Tab("Model"): gr.Markdown("## Model") gr.Markdown("### Data Parameters") with gr.Row(): n_rows = gr.Slider(1, 500, label="Number of Rows", value=300, step=1) n_cols = gr.Slider(1, 500, label="Number of Columns", value=300, step=1) n_cluster = gr.Slider(1, 50, label="Number of Clusters", value=5, step=1) noise = gr.Slider(0, 10, label="Noise", value=5, step=1) gr.Markdown("### Model Parameters") with gr.Row(): n_clusters = gr.Slider(1, 50, label="Number of Clusters", value=5, step=1) svd_method = gr.Dropdown( ["randomized", "arpack"], label="SVD Method", value="randomized" ) model_btn = gr.Button("Fit Model") with gr.Row(): model_btn.click( fn=model_fit, inputs=[n_cluster, noise, n_rows, n_cols, n_clusters, svd_method], outputs=[gr.Plot(), gr.Plot(), gr.Plot()], ) gr.Markdown("### Consensus Score") score_btn = gr.Button("Get Score") with gr.Row(): score_btn.click(fn=get_score, outputs=gr.Text()) demo.launch()