ludusc commited on
Commit
7a3450e
1 Parent(s): 7764300

tried to change to dropdown

Browse files
Home.py CHANGED
@@ -21,8 +21,9 @@ st.write(intro_text)
21
 
22
  # 4 PAGES
23
  st.subheader('Pages')
24
- sections_text = """Overall, there are 4 features in this web app:
25
  1) Disentanglement visualizer
 
26
  ...
27
  """
28
  st.write(sections_text)
 
21
 
22
  # 4 PAGES
23
  st.subheader('Pages')
24
+ sections_text = """Overall, there are x features in this web app:
25
  1) Disentanglement visualizer
26
+ 2) Concept vectors comparison and aggregation
27
  ...
28
  """
29
  st.write(sections_text)
pages/1_Disentanglement.py CHANGED
@@ -20,18 +20,14 @@ SECONDARY_COLOR = '#bce7db'
20
 
21
  st.title('Disentanglement studies')
22
  st.write('> **What concepts can be disentangled in the latent spae of a model?**')
23
- st.write("""Explain more in depth""")
24
 
25
  instruction_text = """Instruction to input:
26
- 1. Choosing image: Users can choose a specific image by entering **Image ID** and hit the _Choose the defined image_ button or can generate an image randomly by hitting the _Generate a random image_ button.
27
- 2. Choosing epsilon: **Epsilon** is the amount of perturbation on the original image under attack. The higher the epsilon is, the more pertubed the image is, the more confusion made to the model.
28
- Users can choose a specific epsilon by engtering **Epsilon** and hit the _Choose the defined epsilon_ button.
29
- Users can also let the algorithm find the smallest epsilon automatically by hitting the _Find the smallest epsilon automatically_ button.
30
- The underlying algorithm will iterate through a set of epsilon in ascending order until reaching the **maximum value of epsilon**.
31
- After each iteration, the epsilon will increase by an amount equal to **step** variable.
32
- Users can change the default values of the two variable value optionally.
33
  """
34
- st.write("To use the functionality below, users need to input the **image** and the **epsilon**.")
35
  with st.expander("See more instruction", expanded=False):
36
  st.write(instruction_text)
37
 
@@ -42,6 +38,7 @@ with open(annotations_file, 'rb') as f:
42
 
43
  ann_df = pd.read_csv('./data/annotated_files/sim_seeds0000-10000.csv')
44
  concepts = './data/concepts.txt'
 
45
  with open(concepts) as f:
46
  labels = [line.strip() for line in f.readlines()]
47
 
@@ -62,16 +59,17 @@ with input_col_1:
62
 
63
  # image_id = st.number_input('Image ID: ', format='%d', step=1)
64
  st.write('**Choose a concept to disentangle**')
65
- chosen_text_id_input = st.empty()
66
- concept_id = chosen_text_id_input.text_input('Concept:', value=st.session_state.concept_id)
67
-
 
68
  choose_text_button = st.form_submit_button('Choose the defined concept')
69
- random_text = st.form_submit_button('Select a random concept')
70
 
71
- if random_text:
72
- concept_id = random.choice(labels)
73
- st.session_state.concept_id = concept_id
74
- chosen_text_id_input.text_input('Concept:', value=st.session_state.concept_id)
75
 
76
  if choose_text_button:
77
  concept_id = str(concept_id)
@@ -103,7 +101,7 @@ smoothgrad_col_1, smoothgrad_col_2, smoothgrad_col_3, smoothgrad_col_4, smoothgr
103
  with output_col_1:
104
  separation_vector, number_important_features = get_separation_space(concept_id, annotations, ann_df)
105
  # st.write(f'Class ID {input_id} - {input_label}: {pred_prob*100:.3f}% confidence')
106
- st.write('Separation vector', separation_vector)
107
  header_col_1.write(f'Concept {concept_id} - Number of relevant nodes: {number_important_features}')
108
 
109
  # ----------------------------- INPUT column 2 & 3 ----------------------------
 
20
 
21
  st.title('Disentanglement studies')
22
  st.write('> **What concepts can be disentangled in the latent spae of a model?**')
23
+ st.write("""Explanation on the functionalities to come.""")
24
 
25
  instruction_text = """Instruction to input:
26
+ 1. Choosing concept:
27
+ 2. Choosing image: Users can choose a specific image by entering **Image ID** and hit the _Choose the defined image_ button or can generate an image randomly by hitting the _Generate a random image_ button.
28
+ 3. Choosing epsilon: **Epsilon** is the lambda amount of translation along the disentangled concept axis. A negative epsilon changes the image in the direction of the concept, a positive one pushes the image away from the concept.
 
 
 
 
29
  """
30
+ st.write("To use the functionality below, users need to input the **concept** to disentangle, an **image** id and the **epsilon** of variation along the disentangled axis.")
31
  with st.expander("See more instruction", expanded=False):
32
  st.write(instruction_text)
33
 
 
38
 
39
  ann_df = pd.read_csv('./data/annotated_files/sim_seeds0000-10000.csv')
40
  concepts = './data/concepts.txt'
41
+
42
  with open(concepts) as f:
43
  labels = [line.strip() for line in f.readlines()]
44
 
 
59
 
60
  # image_id = st.number_input('Image ID: ', format='%d', step=1)
61
  st.write('**Choose a concept to disentangle**')
62
+ # chosen_text_id_input = st.empty()
63
+ # concept_id = chosen_text_id_input.text_input('Concept:', value=st.session_state.concept_id)
64
+ concept_id = st.selectbox('Concept:', tuple(labels), value=st.session_state.concept_id)
65
+
66
  choose_text_button = st.form_submit_button('Choose the defined concept')
67
+ # random_text = st.form_submit_button('Select a random concept')
68
 
69
+ # if random_text:
70
+ # concept_id = random.choice(labels)
71
+ # st.session_state.concept_id = concept_id
72
+ # chosen_text_id_input.text_input('Concept:', value=st.session_state.concept_id)
73
 
74
  if choose_text_button:
75
  concept_id = str(concept_id)
 
101
  with output_col_1:
102
  separation_vector, number_important_features = get_separation_space(concept_id, annotations, ann_df)
103
  # st.write(f'Class ID {input_id} - {input_label}: {pred_prob*100:.3f}% confidence')
104
+ st.write('Concept vector', separation_vector)
105
  header_col_1.write(f'Concept {concept_id} - Number of relevant nodes: {number_important_features}')
106
 
107
  # ----------------------------- INPUT column 2 & 3 ----------------------------
pages/{2_todo.py → 2_Concepts_comparison.py} RENAMED
@@ -6,21 +6,18 @@ import plotly.graph_objects as go
6
 
7
  import graphviz
8
 
9
- from backend.maximally_activating_patches import load_layer_infos, load_activation, get_receptive_field_coordinates
10
  from frontend import on_click_graph
11
- from backend.utils import load_dataset_dict
12
 
13
  HIGHTLIGHT_COLOR = '#e7bcc5'
14
  st.set_page_config(layout='wide')
15
 
16
 
17
- st.title('Maximally activating image patches')
18
- st.write('> **What patterns maximally activate this channel in ConvNeXt model?**')
19
- st.write("""The maximally activating image patches method is a technique used in visualizing the interpretation of convolutional neural networks.
20
- It works by identifying the regions of the input image that activate a particular neuron in the convolutional layer,
21
- thus revealing the features that the neuron is detecting. To achieve this, the method generates image patches then feeds into the model while monitoring the neuron's activation.
22
- The algorithm then selects the patch that produces the highest activation and overlays it on the original image to visualize the features that the neuron is responding to.
23
- """)
24
 
25
  # -------------------------- LOAD DATASET ---------------------------------
26
  dataset_dict = load_dataset_dict()
 
6
 
7
  import graphviz
8
 
9
+ #from backend.maximally_activating_patches import load_layer_infos, load_activation, get_receptive_field_coordinates
10
  from frontend import on_click_graph
11
+ #from backend.utils import load_dataset_dict
12
 
13
  HIGHTLIGHT_COLOR = '#e7bcc5'
14
  st.set_page_config(layout='wide')
15
 
16
 
17
+ st.title('Comparison among concept vectors')
18
+ st.write('> **How do the concept vectors relate to each other?**')
19
+ st.write('> **What is their join impact on the image?**')
20
+ st.write("""Description to write""")
 
 
 
21
 
22
  # -------------------------- LOAD DATASET ---------------------------------
23
  dataset_dict = load_dataset_dict()