jeonghin commited on
Commit
7ca3184
1 Parent(s): e33c9dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -133
app.py CHANGED
@@ -1,147 +1,108 @@
1
- import io
2
- import random
3
- from typing import List, Tuple
4
-
5
- import aiohttp
6
- import panel as pn
7
- from PIL import Image
8
- from transformers import CLIPModel, CLIPProcessor
9
-
10
- pn.extension(design="bootstrap", sizing_mode="stretch_width")
11
-
12
- ICON_URLS = {
13
- "brand-github": "https://github.com/holoviz/panel",
14
- "brand-twitter": "https://twitter.com/Panel_Org",
15
- "brand-linkedin": "https://www.linkedin.com/company/panel-org",
16
- "message-circle": "https://discourse.holoviz.org/",
17
- "brand-discord": "https://discord.gg/AXRHnJU6sP",
18
  }
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- async def random_url(_):
22
- pet = random.choice(["cat", "dog"])
23
- api_url = f"https://api.the{pet}api.com/v1/images/search"
24
- async with aiohttp.ClientSession() as session:
25
- async with session.get(api_url) as resp:
26
- return (await resp.json())[0]["url"]
27
-
28
-
29
- @pn.cache
30
- def load_processor_model(
31
- processor_name: str, model_name: str
32
- ) -> Tuple[CLIPProcessor, CLIPModel]:
33
- processor = CLIPProcessor.from_pretrained(processor_name)
34
- model = CLIPModel.from_pretrained(model_name)
35
- return processor, model
36
-
37
-
38
- async def open_image_url(image_url: str) -> Image:
39
- async with aiohttp.ClientSession() as session:
40
- async with session.get(image_url) as resp:
41
- return Image.open(io.BytesIO(await resp.read()))
42
 
 
 
 
43
 
44
- def get_similarity_scores(class_items: List[str], image: Image) -> List[float]:
45
- processor, model = load_processor_model(
46
- "openai/clip-vit-base-patch32", "openai/clip-vit-base-patch32"
47
  )
48
- inputs = processor(
49
- text=class_items,
50
- images=[image],
51
- return_tensors="pt", # pytorch tensors
 
 
 
 
 
 
 
 
 
52
  )
53
- outputs = model(**inputs)
54
- logits_per_image = outputs.logits_per_image
55
- class_likelihoods = logits_per_image.softmax(dim=1).detach().numpy()
56
- return class_likelihoods[0]
57
-
58
 
59
- async def process_inputs(class_names: List[str], image_url: str):
60
- """
61
- High level function that takes in the user inputs and returns the
62
- classification results as panel objects.
63
- """
64
- try:
65
- main.disabled = True
66
- if not image_url:
67
- yield "##### ⚠️ Provide an image URL"
68
- return
69
-
70
- yield "##### ⚙ Fetching image and running model..."
71
- try:
72
- pil_img = await open_image_url(image_url)
73
- img = pn.pane.Image(pil_img, height=400, align="center")
74
- except Exception as e:
75
- yield f"##### 😔 Something went wrong, please try a different URL!"
76
- return
77
-
78
- class_items = class_names.split(",")
79
- class_likelihoods = get_similarity_scores(class_items, pil_img)
80
-
81
- # build the results column
82
- results = pn.Column("##### 🎉 Here are the results!", img)
83
-
84
- for class_item, class_likelihood in zip(class_items, class_likelihoods):
85
- row_label = pn.widgets.StaticText(
86
- name=class_item.strip(), value=f"{class_likelihood:.2%}", align="center"
87
- )
88
- row_bar = pn.indicators.Progress(
89
- value=int(class_likelihood * 100),
90
- sizing_mode="stretch_width",
91
- bar_color="secondary",
92
- margin=(0, 10),
93
- design=pn.theme.Material,
94
- )
95
- results.append(pn.Column(row_label, row_bar))
96
- yield results
97
- finally:
98
- main.disabled = False
99
 
 
 
 
100
 
101
- # create widgets
102
- randomize_url = pn.widgets.Button(name="Randomize URL", align="end")
103
 
104
- image_url = pn.widgets.TextInput(
105
- name="Image URL to classify",
106
- value=pn.bind(random_url, randomize_url),
107
- )
108
- class_names = pn.widgets.TextInput(
109
- name="Comma separated class names",
110
- placeholder="Enter possible class names, e.g. cat, dog",
111
- value="cat, dog, parrot",
112
- )
113
 
114
- input_widgets = pn.Column(
115
- "##### 😊 Click randomize or paste a URL to start classifying!",
116
- pn.Row(image_url, randomize_url),
117
- class_names,
118
- )
119
 
120
- # add interactivity
121
- interactive_result = pn.panel(
122
- pn.bind(process_inputs, image_url=image_url, class_names=class_names),
123
- height=600,
124
- )
125
-
126
- # add footer
127
- footer_row = pn.Row(pn.Spacer(), align="center")
128
- for icon, url in ICON_URLS.items():
129
- href_button = pn.widgets.Button(icon=icon, width=35, height=35)
130
- href_button.js_on_click(code=f"window.open('{url}')")
131
- footer_row.append(href_button)
132
- footer_row.append(pn.Spacer())
133
-
134
- # create dashboard
135
- main = pn.WidgetBox(
136
- input_widgets,
137
- interactive_result,
138
- footer_row,
139
- )
140
 
141
- title = "Panel Demo - Image Classification"
142
- pn.template.BootstrapTemplate(
143
- title=title,
144
- main=main,
145
- main_max_width="min(50%, 698px)",
146
- header_background="#F08080",
147
- ).servable(title=title)
 
1
+ import dash
2
+ from dash import dcc, html
3
+ from dash.dependencies import Input, Output
4
+ import plotly.express as px
5
+ import pandas as pd
6
+
7
+
8
+ layers = ["conv2d5", "conv2d6", "conv2d7", "conv2d8", "conv2d9", "linear1", "linear2"]
9
+ color_dict = {
10
+ "ClassifierA_Cr203_C6": "#B3262A",
11
+ "ClassifierA_test_stim": "#2f559a",
12
+ "ClassifierA_test_unstim": "#5AADC5",
 
 
 
 
 
13
  }
14
 
15
+ # Initialize Dash app
16
+ app = dash.Dash(__name__)
17
+
18
+ # App layout
19
+ app.layout = html.Div(
20
+ [
21
+ html.Div(
22
+ id="plot-container",
23
+ children=[
24
+ html.P(
25
+ children=[
26
+ "The original report can be accessed through this ",
27
+ html.A(
28
+ "BioRxiv Link",
29
+ href="https://www.biorxiv.org/content/10.1101/2023.06.01.542416v1.full.pdf",
30
+ target="_blank",
31
+ ),
32
+ " and the data through this ",
33
+ html.A(
34
+ "GitHub repository",
35
+ href="https://github.com/MannLabs/SPARCS_pub_figures",
36
+ target="_blank",
37
+ ),
38
+ ],
39
+ id="initial-announcement",
40
+ ),
41
+ html.P(
42
+ "Please select a layer to view from the dropdown.",
43
+ id="initial-message",
44
+ ),
45
+ ],
46
+ ),
47
+ dcc.Dropdown(
48
+ id="layer-dropdown",
49
+ options=[{"label": layer, "value": layer} for layer in layers],
50
+ value=layers[0],
51
+ ),
52
+ dcc.Graph(id="umap-plot"),
53
+ ]
54
+ )
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
+ # Callback to update the plot based on dropdown selection
58
+ @app.callback(Output("umap-plot", "figure"), [Input("layer-dropdown", "value")])
59
+ def update_plot(selected_layer):
60
 
61
+ input_path = (
62
+ f"data/classifier_1_Test_Data/UMAP_data/Raw_data_UMAP_{selected_layer}.csv"
 
63
  )
64
+ df_train_umap = pd.read_csv(input_path)
65
+
66
+ fig = px.scatter(
67
+ df_train_umap.sample(frac=1, random_state=19),
68
+ x="UMAP_1",
69
+ y="UMAP_2",
70
+ color="class_label",
71
+ color_discrete_map=color_dict,
72
+ title=f"UMAP Test Data Labels - {selected_layer}",
73
+ )
74
+ fig.update_traces(
75
+ marker=dict(size=4, opacity=1, line=dict(width=0)),
76
+ selector=dict(mode="markers"),
77
  )
 
 
 
 
 
78
 
79
+ # Update layout to have equal aspect ratio
80
+ fig.update_layout(
81
+ xaxis=dict(
82
+ scaleanchor="y",
83
+ scaleratio=1,
84
+ dtick=2,
85
+ ),
86
+ yaxis=dict(
87
+ scaleanchor="x",
88
+ scaleratio=1,
89
+ dtick=2,
90
+ ),
91
+ legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
92
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
+ # Set a constant range for both axes
95
+ umap_min = min(df_train_umap["UMAP_1"].min(), df_train_umap["UMAP_2"].min())
96
+ umap_max = max(df_train_umap["UMAP_1"].max(), df_train_umap["UMAP_2"].max())
97
 
98
+ x_range = [umap_min - 2, umap_max + 2]
99
+ # y_range = [umap_min - 2, umap_max + 2]
100
 
101
+ fig.update_xaxes(range=x_range, constrain="domain")
102
+ # fig.update_yaxes(range=y_range, scaleanchor="x", scaleratio=1)
 
 
 
 
 
 
 
103
 
104
+ return fig
 
 
 
 
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
+ if __name__ == "__main__":
108
+ app.run_server(debug=True)