Ubuntu commited on
Commit
8c3e286
2 Parent(s): 82fac6c 1cb3b5a

Merge branch 'main' of https://huggingface.co/spaces/ludusc/latent-space-theories into main

Browse files
Home.py CHANGED
@@ -6,24 +6,28 @@ st.set_page_config(layout='wide')
6
 
7
  st.title('About')
8
 
 
9
  # INTRO
10
- intro_text = """This project investigates the nature and nurture of latent spaces, with the aim of formulating a theory of this particular vectorial space. It draws together reflections on the inherent constraints of latent spaces in particular architectures and considers the learning-specific features that emerge.
 
11
  The thesis concentrates mostly on the second part, exploring different avenues for understanding the space. Using a multitude of vision generative models, it discusses possibilities for the systematic exploration of space, including disentanglement properties and coverage of various guidance methods.
12
  It also explores the possibility of comparison across latent spaces and investigates the differences and commonalities across different learning experiments. Furthermore, the thesis investigates the role of stochasticity in newer models.
13
  As a case study, this thesis adopts art historical data, spanning classic art, photography, and modern and contemporary art.
14
-
15
- The project aims to interpret the StyleGAN2 model by several techniques.
16
- > “What concepts are disentangled in the latent space of StyleGAN2”\n
17
- > “Can we quantify the complexity of such concepts?”.
18
-
19
  """
20
  st.write(intro_text)
 
 
 
 
 
 
21
 
22
  # 4 PAGES
23
  st.subheader('Pages')
24
- sections_text = """Overall, there are x features in this web app:
25
- 1) Disentanglement visualizer
26
- 2) Concept vectors comparison and aggregation
 
27
  ...
28
  """
29
  st.write(sections_text)
 
6
 
7
  st.title('About')
8
 
9
+ st.subheader('General aim of the Ph.D. (to be updated)')
10
  # INTRO
11
+ intro_text = """
12
+ This project investigates the nature and nurture of latent spaces, with the aim of formulating a theory of this particular vectorial space. It draws together reflections on the inherent constraints of latent spaces in particular architectures and considers the learning-specific features that emerge.
13
  The thesis concentrates mostly on the second part, exploring different avenues for understanding the space. Using a multitude of vision generative models, it discusses possibilities for the systematic exploration of space, including disentanglement properties and coverage of various guidance methods.
14
  It also explores the possibility of comparison across latent spaces and investigates the differences and commonalities across different learning experiments. Furthermore, the thesis investigates the role of stochasticity in newer models.
15
  As a case study, this thesis adopts art historical data, spanning classic art, photography, and modern and contemporary art.
 
 
 
 
 
16
  """
17
  st.write(intro_text)
18
+ st.subheader('On this experiment')
19
+ st.write(
20
+ """The project aims to interpret the StyleGAN3 model trained on Textiles using disentanglement methods.
21
+ > “What features are disentangled in the latent space of StyleGAN3”\n
22
+ > “Can we quantify the complexity, quality and relations of such features?”.
23
+ """)
24
 
25
  # 4 PAGES
26
  st.subheader('Pages')
27
+ sections_text = """Overall, there are 3 features in this web app:
28
+ 1) Textiles manipulation
29
+ 2) Features comparison
30
+ 3) Vectors algebra manipulation
31
  ...
32
  """
33
  st.write(sections_text)
backend/disentangle_concepts.py CHANGED
@@ -7,7 +7,7 @@ from PIL import Image
7
 
8
 
9
 
10
- def generate_composite_images(model, z, decision_boundaries, lambdas, latent_space='W'):
11
  """
12
  The regenerate_images function takes a model, z, and decision_boundary as input. It then
13
  constructs an inverse rotation/translation matrix and passes it to the generator. The generator
@@ -33,9 +33,19 @@ def generate_composite_images(model, z, decision_boundaries, lambdas, latent_spa
33
  repetitions = 16
34
  z_0 = z
35
 
36
- for decision_boundary, lmbd in zip(decision_boundaries, lambdas):
37
- decision_boundary = torch.from_numpy(decision_boundary.copy()).to(device)
38
- z_0 = z_0 + int(lmbd) * decision_boundary
 
 
 
 
 
 
 
 
 
 
39
 
40
 
41
  if latent_space == 'Z':
 
7
 
8
 
9
 
10
+ def generate_composite_images(model, z, decision_boundaries, lambdas, latent_space='W', negative_colors=None):
11
  """
12
  The regenerate_images function takes a model, z, and decision_boundary as input. It then
13
  constructs an inverse rotation/translation matrix and passes it to the generator. The generator
 
33
  repetitions = 16
34
  z_0 = z
35
 
36
+ if negative_colors:
37
+ for decision_boundary, lmbd, neg_boundary in zip(decision_boundaries, lambdas, negative_colors):
38
+ decision_boundary = torch.from_numpy(decision_boundary.copy()).to(device)
39
+ if neg_boundary != 'None':
40
+ neg_boundary = torch.from_numpy(neg_boundary.copy()).to(device)
41
+
42
+ z_0 = z_0 + int(lmbd) * (decision_boundary - (neg_boundary.T * decision_boundary) * neg_boundary)
43
+ else:
44
+ z_0 = z_0 + int(lmbd) * decision_boundary
45
+ else:
46
+ for decision_boundary, lmbd in zip(decision_boundaries, lambdas):
47
+ decision_boundary = torch.from_numpy(decision_boundary.copy()).to(device)
48
+ z_0 = z_0 + int(lmbd) * decision_boundary
49
 
50
 
51
  if latent_space == 'Z':
pages/{1_Textiles_Disentanglement.py → 1_Textiles_Manipulation.py} RENAMED
@@ -20,10 +20,14 @@ BACKGROUND_COLOR = '#bcd0e7'
20
  SECONDARY_COLOR = '#bce7db'
21
 
22
 
23
- st.title('Disentanglement studies on the Textile Dataset')
24
  st.markdown(
25
  """
26
- This is a demo of the Disentanglement studies on the [iMET Textiles Dataset](https://www.metmuseum.org/art/collection/search/85531).
 
 
 
 
27
  """,
28
  unsafe_allow_html=False,)
29
 
@@ -73,9 +77,6 @@ if 'num_factors' not in st.session_state:
73
  if 'best' not in st.session_state:
74
  st.session_state.best = True
75
 
76
- # def on_change_random_input():
77
- # st.session_state.image_id = st.session_state.image_id
78
-
79
  # ----------------------------- INPUT ----------------------------------
80
  st.header('Input')
81
  input_col_1, input_col_2, input_col_3, input_col_4 = st.columns(4)
@@ -83,8 +84,7 @@ input_col_1, input_col_2, input_col_3, input_col_4 = st.columns(4)
83
  with input_col_1:
84
  with st.form('image_form'):
85
 
86
- # image_id = st.number_input('Image ID: ', format='%d', step=1)
87
- st.write('**Choose or generate a random image to test the disentanglement**')
88
  chosen_image_id_input = st.empty()
89
  image_id = chosen_image_id_input.number_input('Image ID:', format='%d', step=1, value=st.session_state.image_id)
90
 
@@ -103,16 +103,15 @@ with input_col_1:
103
  with input_col_2:
104
  with st.form('text_form_1'):
105
 
106
- st.write('**Choose color to vary**')
107
- type_col = st.selectbox('Color:', tuple(COLORS_LIST), index=7)
108
- colors_button = st.form_submit_button('Choose the defined color')
109
 
110
  st.write('**Set range of change**')
111
  chosen_color_lambda_input = st.empty()
112
  color_lambda = chosen_color_lambda_input.number_input('Lambda:', min_value=-100, step=1, value=7)
113
- color_lambda_button = st.form_submit_button('Choose the defined lambda for color')
114
 
115
- if colors_button or color_lambda_button:
116
  st.session_state.image_id = image_id
117
  st.session_state.concept_ids = type_col
118
  st.session_state.color_lambda = color_lambda
@@ -121,24 +120,28 @@ with input_col_2:
121
  with input_col_3:
122
  with st.form('text_form'):
123
 
124
- st.write('**Saturation variation**')
125
  chosen_saturation_lambda_input = st.empty()
126
  saturation_lambda = chosen_saturation_lambda_input.number_input('Lambda:', min_value=-100, step=1, key=0, value=0)
127
- saturation_lambda_button = st.form_submit_button('Choose the defined lambda for saturation')
128
 
129
- st.write('**Value variation**')
130
  chosen_value_lambda_input = st.empty()
131
  value_lambda = chosen_value_lambda_input.number_input('Lambda:', min_value=-100, step=1, key=1, value=0)
132
- value_lambda_button = st.form_submit_button('Choose the defined lambda for salue')
133
 
134
- if saturation_lambda_button or value_lambda_button:
135
  st.session_state.saturation_lambda = int(saturation_lambda)
136
  st.session_state.value_lambda = int(value_lambda)
137
 
138
  with input_col_4:
139
  with st.form('text_form_2'):
140
- st.write('Use best options')
141
  best = st.selectbox('Option:', tuple([True, False]), index=0)
 
 
 
 
 
142
  if st.session_state.best is False:
143
  st.write('Options for StyleSpace (not available for Saturation and Value)')
144
  sign = st.selectbox('Sign option:', tuple([True, False]), index=1)
@@ -150,10 +153,6 @@ with input_col_4:
150
  extremes = st.selectbox('Extremes option:', tuple([True, False]), index=1)
151
 
152
  choose_options_button = st.form_submit_button('Choose the defined options')
153
- # st.write('**Choose a latent space to disentangle**')
154
- # # chosen_text_id_input = st.empty()
155
- # # concept_id = chosen_text_id_input.text_input('Concept:', value=st.session_state.concept_id)
156
- # space_id = st.selectbox('Space:', tuple(['Z', 'W']))
157
  if choose_options_button:
158
  st.session_state.best = best
159
  if st.session_state.best is False:
@@ -181,7 +180,7 @@ with input_col_4:
181
  # ---------------------------- SET UP OUTPUT ------------------------------
182
  epsilon_container = st.empty()
183
  st.header('Image Manipulation')
184
- st.subheader('Using selected directions')
185
 
186
  header_col_1, header_col_2 = st.columns([1,1])
187
  output_col_1, output_col_2 = st.columns([1,1])
@@ -196,7 +195,7 @@ output_col_1, output_col_2 = st.columns([1,1])
196
 
197
  # ---------------------------- DISPLAY COL 1 ROW 1 ------------------------------
198
  with header_col_1:
199
- st.write(f'Original image')
200
 
201
  with header_col_2:
202
  if st.session_state.best:
@@ -212,8 +211,15 @@ with header_col_2:
212
  tmp_sat = concept_vectors[concept_vectors['color'] == 'Saturation'][concept_vectors['extremes'] == st.session_state.extremes]
213
  saturation_separation_vector, performance_saturation = tmp_sat.reset_index().loc[0, ['vector', 'score']]
214
 
215
- st.write(f'Change in {st.session_state.concept_ids} of {np.round(st.session_state.color_lambda, 2)}, in saturation of {np.round(st.session_state.saturation_lambda, 2)}, in value of {np.round(st.session_state.value_lambda, 2)}. - Performance color vector: {performance_color}, saturation vector: {performance_saturation/100}, value vector: {performance_value/100}')
216
-
 
 
 
 
 
 
 
217
  # ---------------------------- DISPLAY COL 2 ROW 1 ------------------------------
218
 
219
  if st.session_state.space_id == 'Z':
@@ -229,3 +235,4 @@ with output_col_1:
229
  with output_col_2:
230
  image_updated = generate_composite_images(model, original_image_vec, [color_separation_vector, saturation_separation_vector, value_separation_vector], lambdas=[st.session_state.color_lambda, st.session_state.saturation_lambda, st.session_state.value_lambda])
231
  st.image(image_updated)
 
 
20
  SECONDARY_COLOR = '#bce7db'
21
 
22
 
23
+ st.title('Disentanglement on Textile Datasets')
24
  st.markdown(
25
  """
26
+ This is a demo of the Disentanglement experiment on the [iMET Textiles Dataset](https://www.metmuseum.org/art/collection/search/85531).
27
+
28
+ In this page, the user can adjust the colors of textile images generated by an AI by simply traversing the latent space of the AI.
29
+ The colors can be adjusted following the human-intuitive encoding of HSV, adjusting the main Hue of the image with an option of 7 colors + Gray,
30
+ the saturation (the amount of Gray) and the value of the image (the amount of Black).
31
  """,
32
  unsafe_allow_html=False,)
33
 
 
77
  if 'best' not in st.session_state:
78
  st.session_state.best = True
79
 
 
 
 
80
  # ----------------------------- INPUT ----------------------------------
81
  st.header('Input')
82
  input_col_1, input_col_2, input_col_3, input_col_4 = st.columns(4)
 
84
  with input_col_1:
85
  with st.form('image_form'):
86
 
87
+ st.write('**Choose or generate a random base image**')
 
88
  chosen_image_id_input = st.empty()
89
  image_id = chosen_image_id_input.number_input('Image ID:', format='%d', step=1, value=st.session_state.image_id)
90
 
 
103
  with input_col_2:
104
  with st.form('text_form_1'):
105
 
106
+ st.write('**Choose hue to vary**')
107
+ type_col = st.selectbox('Hue:', tuple(COLORS_LIST), index=7)
 
108
 
109
  st.write('**Set range of change**')
110
  chosen_color_lambda_input = st.empty()
111
  color_lambda = chosen_color_lambda_input.number_input('Lambda:', min_value=-100, step=1, value=7)
112
+ color_lambda_button = st.form_submit_button('Choose the defined hue and lambda')
113
 
114
+ if color_lambda_button:
115
  st.session_state.image_id = image_id
116
  st.session_state.concept_ids = type_col
117
  st.session_state.color_lambda = color_lambda
 
120
  with input_col_3:
121
  with st.form('text_form'):
122
 
123
+ st.write('**Choose saturation variation**')
124
  chosen_saturation_lambda_input = st.empty()
125
  saturation_lambda = chosen_saturation_lambda_input.number_input('Lambda:', min_value=-100, step=1, key=0, value=0)
 
126
 
127
+ st.write('**Choose value variation**')
128
  chosen_value_lambda_input = st.empty()
129
  value_lambda = chosen_value_lambda_input.number_input('Lambda:', min_value=-100, step=1, key=1, value=0)
130
+ value_lambda_button = st.form_submit_button('Choose the defined lambda for value and saturation')
131
 
132
+ if value_lambda_button:
133
  st.session_state.saturation_lambda = int(saturation_lambda)
134
  st.session_state.value_lambda = int(value_lambda)
135
 
136
  with input_col_4:
137
  with st.form('text_form_2'):
138
+ st.write('Use the best vectors (after hyperparameter tuning)')
139
  best = st.selectbox('Option:', tuple([True, False]), index=0)
140
+ sign = True
141
+ num_factors=10
142
+ cl_method='LR'
143
+ regularization=0.1
144
+ extremes=True
145
  if st.session_state.best is False:
146
  st.write('Options for StyleSpace (not available for Saturation and Value)')
147
  sign = st.selectbox('Sign option:', tuple([True, False]), index=1)
 
153
  extremes = st.selectbox('Extremes option:', tuple([True, False]), index=1)
154
 
155
  choose_options_button = st.form_submit_button('Choose the defined options')
 
 
 
 
156
  if choose_options_button:
157
  st.session_state.best = best
158
  if st.session_state.best is False:
 
180
  # ---------------------------- SET UP OUTPUT ------------------------------
181
  epsilon_container = st.empty()
182
  st.header('Image Manipulation')
183
+ st.write('Using selected vectors to modify the original image...')
184
 
185
  header_col_1, header_col_2 = st.columns([1,1])
186
  output_col_1, output_col_2 = st.columns([1,1])
 
195
 
196
  # ---------------------------- DISPLAY COL 1 ROW 1 ------------------------------
197
  with header_col_1:
198
+ st.write(f'### Original image')
199
 
200
  with header_col_2:
201
  if st.session_state.best:
 
211
  tmp_sat = concept_vectors[concept_vectors['color'] == 'Saturation'][concept_vectors['extremes'] == st.session_state.extremes]
212
  saturation_separation_vector, performance_saturation = tmp_sat.reset_index().loc[0, ['vector', 'score']]
213
 
214
+ st.write('### Modified image')
215
+ st.write(f"""
216
+ Change in hue: {st.session_state.concept_ids} of amount: {np.round(st.session_state.color_lambda, 2)},
217
+ in: saturation of amount: {np.round(st.session_state.saturation_lambda, 2)},
218
+ in: value of amount: {np.round(st.session_state.value_lambda, 2)}.\
219
+ Verification performance of hue vector: {performance_color},
220
+ saturation vector: {performance_saturation/100},
221
+ value vector: {performance_value/100}""")
222
+
223
  # ---------------------------- DISPLAY COL 2 ROW 1 ------------------------------
224
 
225
  if st.session_state.space_id == 'Z':
 
235
  with output_col_2:
236
  image_updated = generate_composite_images(model, original_image_vec, [color_separation_vector, saturation_separation_vector, value_separation_vector], lambdas=[st.session_state.color_lambda, st.session_state.saturation_lambda, st.session_state.value_lambda])
237
  st.image(image_updated)
238
+
pages/{2_Colours_comparison.py → 2_Network_comparison.py} RENAMED
@@ -24,7 +24,11 @@ st.set_page_config(layout='wide')
24
 
25
  st.title('Comparison among color directions')
26
  st.write('> **How do the color directions relate to each other?**')
27
- st.write('> **What is their joint impact on the image?**')
 
 
 
 
28
 
29
 
30
  annotations_file = './data/textile_annotated_files/seeds0000-100000_S.pkl'
@@ -46,10 +50,8 @@ with dnnlib.util.open_url('./data/textile_model_files/network-snapshot-005000.pk
46
 
47
  COLORS_LIST = ['Gray', 'Red Orange', 'Yellow', 'Green', 'Light Blue', 'Blue', 'Purple', 'Pink', 'Saturation', 'Value']
48
 
49
- if 'image_id' not in st.session_state:
50
- st.session_state.image_id = 0
51
  if 'concept_ids' not in st.session_state:
52
- st.session_state.concept_ids = [COLORS_LIST[-1], COLORS_LIST[-2], ]
53
  if 'sign' not in st.session_state:
54
  st.session_state.sign = False
55
  if 'extremes' not in st.session_state:
@@ -60,10 +62,8 @@ if 'cl_method' not in st.session_state:
60
  st.session_state.cl_method = False
61
  if 'num_factors' not in st.session_state:
62
  st.session_state.num_factors = False
63
-
64
-
65
- if 'space_id' not in st.session_state:
66
- st.session_state.space_id = 'W'
67
 
68
  # ----------------------------- INPUT ----------------------------------
69
  st.header('Input')
@@ -76,7 +76,7 @@ with input_col_1:
76
  st.write('**Choose a series of colors to compare**')
77
  # chosen_text_id_input = st.empty()
78
  # concept_id = chosen_text_id_input.text_input('Concept:', value=st.session_state.concept_id)
79
- concept_ids = st.multiselect('Color (including Saturation and Value):', tuple(COLORS_LIST), default=[COLORS_LIST[-1], COLORS_LIST[-2], ])
80
  choose_text_button = st.form_submit_button('Choose the defined colors')
81
 
82
  if choose_text_button:
@@ -85,27 +85,33 @@ with input_col_1:
85
 
86
  with input_col_2:
87
  with st.form('text_form_1'):
88
- st.write('Options for StyleSpace (not available for Saturation and Value)')
89
- sign = st.selectbox('Sign option:', tuple([True, False]), index=1)
90
- num_factors = st.selectbox('Number of factors option:', tuple([1, 5, 10, 20, False]), index=4)
91
- st.write('Options for InterFaceGAN (not available for Saturation and Value)')
92
- cl_method = st.selectbox('Classification method option:', tuple(['LR', 'SVM', False]), index=2)
93
- regularization = st.selectbox('Regularization option:', tuple([0.1, 1.0, False]), index=2)
94
- st.write('Options for InterFaceGAN (only for Saturation and Value)')
95
- extremes = st.selectbox('Extremes option:', tuple([True, False]), index=1)
96
-
 
 
 
 
 
 
 
 
97
  choose_options_button = st.form_submit_button('Choose the defined options')
98
- # st.write('**Choose a latent space to disentangle**')
99
- # # chosen_text_id_input = st.empty()
100
- # # concept_id = chosen_text_id_input.text_input('Concept:', value=st.session_state.concept_id)
101
- # space_id = st.selectbox('Space:', tuple(['Z', 'W']))
102
  if choose_options_button:
103
- st.session_state.sign = sign
104
- st.session_state.num_factors = num_factors
105
- st.session_state.cl_method = cl_method
106
- st.session_state.regularization = regularization
107
- st.session_state.extremes = extremes
108
-
 
 
109
  # ---------------------------- SET UP OUTPUT ------------------------------
110
  epsilon_container = st.empty()
111
  st.header('Comparison')
@@ -115,23 +121,28 @@ header_col_1, header_col_2 = st.columns([3,1])
115
  output_col_1, output_col_2 = st.columns([3,1])
116
 
117
  # ---------------------------- DISPLAY COL 1 ROW 1 ------------------------------
118
- tmp = concept_vectors[concept_vectors['color'].isin(st.session_state.concept_ids)]
119
- tmp = tmp[tmp['sign'] == st.session_state.sign][tmp['extremes'] == st.session_state.extremes][tmp['num_factors'] == st.session_state.num_factors][tmp['cl_method'] == st.session_state.cl_method][tmp['regularization'] == st.session_state.regularization]
 
 
 
 
120
  info = tmp.loc[:, ['vector', 'score', 'color', 'kwargs']].values
121
  concept_ids = [i[2] for i in info] #+ ' ' + i[3]
122
 
123
  with header_col_1:
124
- st.write('Similarity graph')
125
 
126
  with header_col_2:
127
- st.write('Information')
128
 
129
  with output_col_2:
130
  for i,concept_id in enumerate(concept_ids):
131
- st.write(f'Color {info[i][2]} - Settings: {info[i][3]} Performance of the color vector: {info[i][1]}')# - Nodes {",".join(list(imp_nodes))}')
 
 
132
 
133
  with output_col_1:
134
-
135
  edges = []
136
  for i in range(len(concept_ids)):
137
  for j in range(len(concept_ids)):
 
24
 
25
  st.title('Comparison among color directions')
26
  st.write('> **How do the color directions relate to each other?**')
27
+ st.write("""
28
+ This page provides a simple network-based framework to inspect the vector similarity (cosine similarity) among the found color vectors.
29
+ The nodes are the colors chosen for comparison and the strength of the edge represents the similarity.
30
+
31
+ """)
32
 
33
 
34
  annotations_file = './data/textile_annotated_files/seeds0000-100000_S.pkl'
 
50
 
51
  COLORS_LIST = ['Gray', 'Red Orange', 'Yellow', 'Green', 'Light Blue', 'Blue', 'Purple', 'Pink', 'Saturation', 'Value']
52
 
 
 
53
  if 'concept_ids' not in st.session_state:
54
+ st.session_state.concept_ids = COLORS_LIST
55
  if 'sign' not in st.session_state:
56
  st.session_state.sign = False
57
  if 'extremes' not in st.session_state:
 
62
  st.session_state.cl_method = False
63
  if 'num_factors' not in st.session_state:
64
  st.session_state.num_factors = False
65
+ if 'best' not in st.session_state:
66
+ st.session_state.best = True
 
 
67
 
68
  # ----------------------------- INPUT ----------------------------------
69
  st.header('Input')
 
76
  st.write('**Choose a series of colors to compare**')
77
  # chosen_text_id_input = st.empty()
78
  # concept_id = chosen_text_id_input.text_input('Concept:', value=st.session_state.concept_id)
79
+ concept_ids = st.multiselect('Color (including Saturation and Value):', tuple(COLORS_LIST), default=COLORS_LIST)
80
  choose_text_button = st.form_submit_button('Choose the defined colors')
81
 
82
  if choose_text_button:
 
85
 
86
  with input_col_2:
87
  with st.form('text_form_1'):
88
+ st.write('Use the best vectors (after hyperparameter tuning)')
89
+ best = st.selectbox('Option:', tuple([True, False]), index=0)
90
+ sign = True
91
+ num_factors=10
92
+ cl_method='LR'
93
+ regularization=0.1
94
+ extremes=True
95
+ if st.session_state.best is False:
96
+ st.write('Options for StyleSpace (not available for Saturation and Value)')
97
+ sign = st.selectbox('Sign option:', tuple([True, False]), index=1)
98
+ num_factors = st.selectbox('Number of factors option:', tuple([1, 5, 10, 20, False]), index=4)
99
+ st.write('Options for InterFaceGAN (not available for Saturation and Value)')
100
+ cl_method = st.selectbox('Classification method option:', tuple(['LR', 'SVM', False]), index=2)
101
+ regularization = st.selectbox('Regularization option:', tuple([0.1, 1.0, False]), index=2)
102
+ st.write('Options for InterFaceGAN (only for Saturation and Value)')
103
+ extremes = st.selectbox('Extremes option:', tuple([True, False]), index=1)
104
+
105
  choose_options_button = st.form_submit_button('Choose the defined options')
 
 
 
 
106
  if choose_options_button:
107
+ st.session_state.best = best
108
+ if st.session_state.best is False:
109
+ st.session_state.sign = sign
110
+ st.session_state.num_factors = num_factors
111
+ st.session_state.cl_method = cl_method
112
+ st.session_state.regularization = regularization
113
+ st.session_state.extremes = extremes
114
+
115
  # ---------------------------- SET UP OUTPUT ------------------------------
116
  epsilon_container = st.empty()
117
  st.header('Comparison')
 
121
  output_col_1, output_col_2 = st.columns([3,1])
122
 
123
  # ---------------------------- DISPLAY COL 1 ROW 1 ------------------------------
124
+ if st.session_state.best:
125
+ tmp = concept_vectors[concept_vectors['color'].isin(st.session_state.concept_ids)].groupby('color').first().reset_index()
126
+ else:
127
+ tmp = concept_vectors[concept_vectors['color'].isin(st.session_state.concept_ids)]
128
+ tmp = tmp[tmp['sign'] == st.session_state.sign][tmp['extremes'] == st.session_state.extremes][tmp['num_factors'] == st.session_state.num_factors][tmp['cl_method'] == st.session_state.cl_method][tmp['regularization'] == st.session_state.regularization]
129
+
130
  info = tmp.loc[:, ['vector', 'score', 'color', 'kwargs']].values
131
  concept_ids = [i[2] for i in info] #+ ' ' + i[3]
132
 
133
  with header_col_1:
134
+ st.write('### Similarity graph')
135
 
136
  with header_col_2:
137
+ st.write('### Information')
138
 
139
  with output_col_2:
140
  for i,concept_id in enumerate(concept_ids):
141
+ st.write(f'''Color: {info[i][2]}.\
142
+ Settings: {info[i][3]}\
143
+ ''')
144
 
145
  with output_col_1:
 
146
  edges = []
147
  for i in range(len(concept_ids)):
148
  for j in range(len(concept_ids)):
pages/3_Vectors_algebra.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pickle
3
+ import pandas as pd
4
+ import numpy as np
5
+ import random
6
+ import torch
7
+
8
+ from matplotlib.backends.backend_agg import RendererAgg
9
+
10
+ from backend.disentangle_concepts import *
11
+ import torch_utils
12
+ import dnnlib
13
+ import legacy
14
+
15
+ _lock = RendererAgg.lock
16
+
17
+
18
+ st.set_page_config(layout='wide')
19
+ BACKGROUND_COLOR = '#bcd0e7'
20
+ SECONDARY_COLOR = '#bce7db'
21
+
22
+
23
+ st.title('Vector algebra using disentangled vectors')
24
+ st.markdown(
25
+ """
26
+ This page offers the possibility to edit the colors of a given textile image using vector algebra and projections.
27
+ It allows to select several colors to move towards and against (selecting a positive or negative lambda).
28
+ Furthermore, it offers the possibility of conditional manipulation, by moving in the direction of a color n1 without affecting the color n2.
29
+ This is done using a projected direction n1 - (n1.T n2) n2.
30
+ """,
31
+ unsafe_allow_html=False,)
32
+
33
+ annotations_file = './data/textile_annotated_files/seeds0000-100000_S.pkl'
34
+ with open(annotations_file, 'rb') as f:
35
+ annotations = pickle.load(f)
36
+
37
+ concept_vectors = pd.read_csv('./data/stored_vectors/scores_colors_hsv.csv')
38
+ concept_vectors['vector'] = [np.array([float(xx) for xx in x]) for x in concept_vectors['vector'].str.split(', ')]
39
+ concept_vectors['score'] = concept_vectors['score'].astype(float)
40
+
41
+ concept_vectors = concept_vectors.sort_values('score', ascending=False).reset_index()
42
+
43
+ with dnnlib.util.open_url('./data/textile_model_files/network-snapshot-005000.pkl') as f:
44
+ model = legacy.load_network_pkl(f)['G_ema'].to('cpu') # type: ignore
45
+
46
+ COLORS_LIST = ['Gray', 'Red Orange', 'Yellow', 'Green', 'Light Blue', 'Blue', 'Purple', 'Pink', 'Saturation', 'Value']
47
+ COLORS_NEGATIVE = COLORS_LIST + ['None']
48
+
49
+ if 'image_id' not in st.session_state:
50
+ st.session_state.image_id = 52921
51
+ if 'colors' not in st.session_state:
52
+ st.session_state.colors = [COLORS_LIST[5], COLORS_LIST[7]]
53
+ if 'non_colors' not in st.session_state:
54
+ st.session_state.non_colors = ['None']
55
+ if 'color_lambda' not in st.session_state:
56
+ st.session_state.color_lambda = [5]
57
+
58
+ # ----------------------------- INPUT ----------------------------------
59
+ epsilon_container = st.empty()
60
+ st.header('Image Manipulation with Vector Algebra')
61
+
62
+ header_col_1, header_col_2, header_col_3, header_col_4 = st.columns([1,1,1,1])
63
+ input_col_1, output_col_2, output_col_3, input_col_4 = st.columns([1,1,1,1])
64
+
65
+ # --------------------------- INPUT column 1 ---------------------------
66
+ with input_col_1:
67
+ with st.form('image_form'):
68
+
69
+ # image_id = st.number_input('Image ID: ', format='%d', step=1)
70
+ st.write('**Choose or generate a random image**')
71
+ chosen_image_id_input = st.empty()
72
+ image_id = chosen_image_id_input.number_input('Image ID:', format='%d', step=1, value=st.session_state.image_id)
73
+
74
+ choose_image_button = st.form_submit_button('Choose the defined image')
75
+ random_id = st.form_submit_button('Generate a random image')
76
+
77
+ if random_id:
78
+ image_id = random.randint(0, 100000)
79
+ st.session_state.image_id = image_id
80
+ chosen_image_id_input.number_input('Image ID:', format='%d', step=1, value=st.session_state.image_id)
81
+
82
+ if choose_image_button:
83
+ image_id = int(image_id)
84
+ st.session_state.image_id = image_id
85
+
86
+ with header_col_1:
87
+ st.write('### Input image selection')
88
+
89
+ original_image_vec = annotations['w_vectors'][st.session_state.image_id]
90
+ img = generate_original_image(original_image_vec, model)
91
+
92
+ with output_col_2:
93
+ st.image(img)
94
+
95
+ with header_col_2:
96
+ st.write('### Original image')
97
+
98
+ with input_col_4:
99
+ with st.form('text_form_1'):
100
+
101
+ st.write('**Colors to vary (including Saturation and Value)**')
102
+ colors = st.multiselect('Color:', tuple(COLORS_LIST), default=[COLORS_LIST[5], COLORS_LIST[7]])
103
+ colors_button = st.form_submit_button('Choose the defined colors')
104
+
105
+ st.session_state.image_id = image_id
106
+ st.session_state.colors = colors
107
+ st.session_state.color_lambda = [5]*len(colors)
108
+ st.session_state.non_colors = ['None']*len(colors)
109
+
110
+ lambdas = []
111
+ negative_cols = []
112
+ for color in colors:
113
+ st.write('### '+color )
114
+ st.write('**Set range of change (can be negative)**')
115
+ chosen_color_lambda_input = st.empty()
116
+ color_lambda = chosen_color_lambda_input.number_input('Lambda:', min_value=-100, step=1, value=5, key=color+'_number')
117
+ lambdas.append(color_lambda)
118
+
119
+ st.write('**Set dimensions of change to not consider**')
120
+ chosen_color_negative_input = st.empty()
121
+ color_negative = chosen_color_negative_input.selectbox('Color:', tuple(COLORS_NEGATIVE), index=len(COLORS_NEGATIVE)-1, key=color+'_noncolor')
122
+ negative_cols.append(color_negative)
123
+
124
+ lambdas_button = st.form_submit_button('Submit options')
125
+ if lambdas_button:
126
+ st.session_state.color_lambda = lambdas
127
+ st.session_state.non_colors = negative_cols
128
+
129
+
130
+ with header_col_4:
131
+ st.write('### Color settings')
132
+ # print(st.session_state.colors)
133
+ # print(st.session_state.color_lambda)
134
+ # print(st.session_state.non_colors)
135
+
136
+ # ---------------------------- DISPLAY COL 1 ROW 1 ------------------------------
137
+
138
+ with header_col_3:
139
+ separation_vectors = []
140
+ for col in st.session_state.colors:
141
+ separation_vector, score_1 = concept_vectors[concept_vectors['color'] == col].reset_index().loc[0, ['vector', 'score']]
142
+ separation_vectors.append(separation_vector)
143
+
144
+ negative_separation_vectors = []
145
+ for non_col in st.session_state.non_colors:
146
+ if non_col != 'None':
147
+ negative_separation_vector, score_2 = concept_vectors[concept_vectors['color'] == non_col].reset_index().loc[0, ['vector', 'score']]
148
+ negative_separation_vectors.append(negative_separation_vector)
149
+ else:
150
+ negative_separation_vectors.append('None')
151
+ ## n1 − (n1T n2)n2
152
+ # print(negative_separation_vectors, separation_vectors)
153
+ st.write('### Output Image')
154
+ st.write(f'''Change in colors: {str(st.session_state.colors)},\
155
+ without affecting colors {str(st.session_state.non_colors)}''')
156
+
157
+ # ---------------------------- DISPLAY COL 2 ROW 1 ------------------------------
158
+
159
+ with output_col_3:
160
+ image_updated = generate_composite_images(model, original_image_vec, separation_vectors,
161
+ lambdas=st.session_state.color_lambda,
162
+ negative_colors=negative_separation_vectors)
163
+ st.image(image_updated)