ludusc commited on
Commit
2cafca2
1 Parent(s): 731be70

lots of changes

Browse files
.gitignore CHANGED
@@ -32,7 +32,10 @@ git-large-file
32
  deta_drive.py
33
  secret_keys.py
34
 
35
- data/old
 
 
 
36
  # Large files
37
  # data/preprocessed_image_net/
38
  # data/activation/*.pkl
 
32
  deta_drive.py
33
  secret_keys.py
34
 
35
+ data/old/
36
+ archive/
37
+ figures/
38
+ colors_test/
39
  # Large files
40
  # data/preprocessed_image_net/
41
  # data/activation/*.pkl
DisentanglementBase.py CHANGED
@@ -1,9 +1,11 @@
 
 
1
  import numpy as np
2
  import pandas as pd
3
 
4
  from sklearn.svm import SVC
5
  from sklearn.decomposition import PCA
6
- from sklearn.linear_model import LogisticRegression
7
  from sklearn.model_selection import train_test_split
8
 
9
  from tqdm import tqdm
@@ -27,8 +29,50 @@ sys.path.append('.')
27
  import dnnlib
28
  import legacy
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  class DisentanglementBase:
31
- def __init__(self, repo_folder, model, annotations, df, space, colors_list, compute_s):
32
  self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
33
  print('Using device', self.device)
34
  self.repo_folder = repo_folder
@@ -36,6 +80,8 @@ class DisentanglementBase:
36
  self.annotations = annotations
37
  self.df = df
38
  self.space = space
 
 
39
 
40
  self.layers = ['input', 'L0_36_512', 'L1_36_512', 'L2_36_512', 'L3_52_512',
41
  'L4_52_512', 'L5_84_512', 'L6_84_512', 'L7_148_512', 'L8_148_512',
@@ -49,7 +95,6 @@ class DisentanglementBase:
49
  if compute_s:
50
  self.get_s_space()
51
 
52
-
53
  def to_hsv(self):
54
  """
55
  The tohsv function takes the top 3 colors of each image and converts them to HSV values.
@@ -60,17 +105,30 @@ class DisentanglementBase:
60
  :doc-author: Trelent
61
  """
62
  print('Adding HSV encoding')
63
- self.df['H1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'HSV')[0])
64
- self.df['H2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'HSV')[0])
65
- self.df['H3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'HSV')[0])
 
 
 
 
 
 
 
 
66
 
67
- self.df['S1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'HSV')[1])
68
- self.df['S2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'HSV')[1])
69
- self.df['S3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'HSV')[1])
 
70
 
71
- self.df['V1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'HSV')[2])
72
- self.df['V2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'HSV')[2])
73
- self.df['V3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'HSV')[2])
 
 
 
 
74
 
75
  def get_s_space(self):
76
  """
@@ -89,7 +147,7 @@ class DisentanglementBase:
89
  W = w_torch.expand((16, -1)).unsqueeze(0)
90
  s = []
91
  for i,layer in enumerate(self.layers):
92
- s.append(getattr(self.model.synthesis, layer).affine(W[0, i].unsqueeze(0)).numpy())
93
 
94
  ss.append(s)
95
  self.annotations['s_vectors'] = ss
@@ -116,17 +174,32 @@ class DisentanglementBase:
116
  print('Shape embedding:', X.shape)
117
  return X
118
 
119
- def get_train_val(self, var='H1', cat=True):
120
  X = self.get_encoded_latent()
121
- y = np.array(self.df[var].values)
122
- if cat:
 
 
 
123
  y_cat = pd.cut(y,
124
- bins=[x*256/12 if x<12 else 256 for x in range(13)],
125
- labels=self.colors_list
126
- ).fillna('Warm Pink Red')
 
 
127
  x_train, x_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2)
128
  else:
129
- x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
 
 
 
 
 
 
 
 
 
 
130
  return x_train, x_val, y_train, y_val
131
 
132
  def InterFaceGAN_separation_vector(self, method='LR', C=0.1):
@@ -148,17 +221,23 @@ class DisentanglementBase:
148
  """
149
  x_train, x_val, y_train, y_val = self.get_train_val()
150
 
151
- if method == 'SVM':
152
- svc = SVC(gamma='auto', kernel='linear', random_state=0, C=C)
153
- svc.fit(x_train, y_train)
154
- print('Val performance SVM', np.round(svc.score(x_val, y_val), 2))
155
- return svc.coef_ / np.linalg.norm(clf.coef_)
156
- elif method == 'LR':
157
- clf = LogisticRegression(random_state=0, C=C)
 
 
 
 
 
 
158
  clf.fit(x_train, y_train)
159
- print('Val performance logistic regression', np.round(clf.score(x_val, y_val), 2))
160
  return clf.coef_ / np.linalg.norm(clf.coef_)
161
-
162
  def get_original_position_latent(self, positive_idxs, negative_idxs):
163
  # ... (existing code for get_original_pos)
164
  separation_vectors = []
@@ -327,7 +406,7 @@ class DisentanglementBase:
327
 
328
  return img
329
 
330
- def generate_changes(self, seed, separation_vector, min_epsilon=-3, max_epsilon=3, count=5, savefig=True, feature=None, method=None):
331
  """
332
  The regenerate_images function takes a model, z, and decision_boundary as input. It then
333
  constructs an inverse rotation/translation matrix and passes it to the generator. The generator
@@ -348,14 +427,13 @@ class DisentanglementBase:
348
  lambdas = np.linspace(min_epsilon, max_epsilon, count)
349
  images = []
350
  # Generate images.
351
- for _, lambd in enumerate(tqdm(lambdas)):
352
  if self.space.lower() == 's':
353
  images.append(self.generate_flexible_images(seed, separation_vector=separation_vector, lambd=lambd))
354
  elif self.space.lower() in ['z', 'w']:
355
  images.append(self.generate_images(seed, separation_vector=separation_vector, lambd=lambd))
356
 
357
  if savefig:
358
- print('Generating image for color', feature)
359
  fig, axs = plt.subplots(1, len(images), figsize=(90,20))
360
  title = 'Disentanglement method: '+ method + ', on feature: ' + feature + ' on space: ' + self.space + ', image seed: ' + str(seed)
361
  name = '_'.join([method, feature, self.space, str(seed), str(lambdas[-1])])
@@ -365,42 +443,117 @@ class DisentanglementBase:
365
  axs[i].imshow(image)
366
  axs[i].set_title(np.round(lambd, 2))
367
  plt.tight_layout()
368
- plt.savefig(join(self.repo_folder, 'figures', name+'.jpg'))
369
  plt.close()
 
 
 
 
 
 
 
 
370
  return images, lambdas
371
 
372
  def get_verification_score(self, separation_vector, feature_id, samples=10, lambd=1, savefig=False, feature=None, method=None):
373
  items = random.sample(range(100000), samples)
374
- hue_low = feature_id * 256 / 12
375
- hue_high = (feature_id + 1) * 256 / 12
 
 
 
 
 
 
 
 
376
 
377
- matches = 0
378
 
379
- for seed in tqdm(items):
380
- images, lambdas = self.generate_changes(seed, separation_vector, min_epsilon=-lambd, max_epsilon=lambd, count=3, savefig=savefig, feature=feature, method=method)
381
- try:
382
- colors_negative = extract_color(images[0], 5, 1, None)
383
- h0, s0, v0 = ImageColor.getcolor(colors_negative[0], 'HSV')
384
-
385
- colors_orig = extract_color(images[1], 5, 1, None)
386
- h1, s1, v1 = ImageColor.getcolor(colors_orig[0], 'HSV')
387
 
388
- colors_positive = extract_color(images[2], 5, 1, None)
389
- h2, s2, v2 = ImageColor.getcolor(colors_positive[0], 'HSV')
 
 
 
 
 
 
 
 
 
390
 
391
- if h1 > hue_low and h1 < hue_high:
392
- samples -= 1
393
- else:
394
- if (h0 > hue_low and h0 < hue_high) or (h2 > hue_low and h2 < hue_high):
395
- matches += 1
396
 
397
- except Exception as e:
398
- print(e)
399
 
 
 
 
 
 
 
 
 
400
 
401
- return np.round(matches / samples, 2)
402
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404
  def main():
405
  repo_folder = '.'
406
  annotations_file = join(repo_folder, 'data/textile_annotated_files/seeds0000-100000_S.pkl')
@@ -417,26 +570,56 @@ def main():
417
  colors_list = ['Red', 'Orange', 'Yellow', 'Yellow Green', 'Chartreuse Green',
418
  'Kelly Green', 'Green Blue Seafoam', 'Cyan Blue',
419
  'Warm Blue', 'Indigo', 'Purple Magenta', 'Magenta Pink']
 
 
420
 
421
  scores = []
422
- kwargs = {'CL method':['LR', 'SVM'], 'C':[0.1, 1], 'sign':[True, False], 'num_factors':[1, 10, 20], 'cutout': [None], 'max_lambda':[18, 3], 'samples':50, 'lambda_verif':[10, 5, 3]}
 
 
 
 
 
423
 
424
- for space in ['w', 'z', 's']:
 
 
 
 
 
 
 
 
 
 
 
 
425
  print('Launching experiment with space:', space)
426
- disentanglemnet_exp = DisentanglementBase(repo_folder, model, annotations, df, space=space, colors_list=colors_list, compute_s=False)
 
 
 
 
 
 
 
 
 
 
 
427
 
428
- for method in ['InterFaceGAN', 'StyleSpace', 'GANSpace']:
429
  if space != 's' and method == 'InterFaceGAN':
430
  print('Now obtaining separation vector for using InterfaceGAN')
431
  for met in kwargs['CL method']:
432
  for c in kwargs['C']:
433
  separation_vectors = disentanglemnet_exp.InterFaceGAN_separation_vector(method=met, C=c)
434
  for i, color in enumerate(colors_list):
435
- print('Generating images with variations')
436
  for s in range(30):
437
  seed = random.randint(0,100000)
438
  for eps in kwargs['max_lambda']:
439
- disentanglemnet_exp.generate_changes(seed, separation_vectors[i], min_epsilon=-eps, max_epsilon=eps, savefig=True, feature=color, method=str(method) + '_' + str(met) + '_' + str(c))
440
 
441
  print('Finally obtaining verification score')
442
  for verif in kwargs['lambda_verif']:
@@ -446,7 +629,7 @@ def main():
446
  scores.append([space, method, color, score, 'classification method:' + met + ', regularization: ' + str(c) + ', verification lambda:' + str(verif), ', '.join(list(separation_vectors[i].astype(str)))])
447
  score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector'])
448
  print(score_df)
449
- score_df.to_csv(join(repo_folder, 'data/scores.csv'))
450
 
451
 
452
  elif method == 'StyleSpace':
@@ -456,11 +639,11 @@ def main():
456
  for cutout in kwargs['cutout']:
457
  separation_vectors = disentanglemnet_exp.StyleSpace_separation_vector(sign=sign, num_factors=num_factors, cutout=cutout)
458
  for i, color in enumerate(colors_list):
459
- print('Generating images with variations')
460
  for s in range(30):
461
  seed = random.randint(0,100000)
462
  for eps in kwargs['max_lambda']:
463
- disentanglemnet_exp.generate_changes(seed, separation_vectors[i], min_epsilon=-eps, max_epsilon=eps, savefig=True, feature=color, method=method + '_' + str(num_factors) + '_' + str(cutout) + '_' + str(sign))
464
 
465
  print('Finally obtaining verification score')
466
  for verif in kwargs['lambda_verif']:
@@ -470,29 +653,28 @@ def main():
470
  scores.append([space, method, color, score, 'using sign:' + str(sign) + ', number of factors: ' + str(num_factors) + ', using cutout: ' + str(cutout) + ', verification lambda:' + str(verif), ', '.join(list(separation_vectors[i].astype(str)))])
471
  score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector'])
472
  print(score_df)
473
- score_df.to_csv(join(repo_folder, 'data/scores.csv'))
474
 
475
  if space == 'w' and method == 'GANSpace':
476
  print('Now obtaining separation vector for using GANSpace')
477
  separation_vectors = disentanglemnet_exp.GANSpace_separation_vectors(100)
 
478
  for s in range(30):
479
  print('Generating images with variations')
480
  seed = random.randint(0,100000)
481
  for i in range(100):
482
  for eps in kwargs['max_lambda']:
483
- disentanglemnet_exp.generate_changes(seed, separation_vectors[i], min_epsilon=-eps, max_epsilon=eps, savefig=True, feature=color, method=method)
484
 
485
  score = None
486
- scores.append([space, method, color, score, '100', ', '.join(list(separation_vectors[i].astype(str)))])
487
  else:
488
  print('Skipping', method, 'on space', space)
489
  continue
490
 
491
-
492
-
493
  score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector'])
494
  print(score_df)
495
- score_df.to_csv(join(repo_folder, 'data/scores.csv'))
496
 
497
  if __name__ == "__main__":
498
  main()
 
1
+ #!/usr/bin/env python
2
+
3
  import numpy as np
4
  import pandas as pd
5
 
6
  from sklearn.svm import SVC
7
  from sklearn.decomposition import PCA
8
+ from sklearn.linear_model import LogisticRegression, LinearRegression
9
  from sklearn.model_selection import train_test_split
10
 
11
  from tqdm import tqdm
 
29
  import dnnlib
30
  import legacy
31
 
32
+
33
+ def hex2rgb(hex_value):
34
+ h = hex_value.strip("#")
35
+ rgb = tuple(int(h[i:i+2], 16) for i in (0, 2, 4))
36
+ return rgb
37
+
38
+ def rgb2hsv(r, g, b):
39
+ # Normalize R, G, B values
40
+ r, g, b = r / 255.0, g / 255.0, b / 255.0
41
+
42
+ # h, s, v = hue, saturation, value
43
+ max_rgb = max(r, g, b)
44
+ min_rgb = min(r, g, b)
45
+ difference = max_rgb-min_rgb
46
+
47
+ # if max_rgb and max_rgb are equal then h = 0
48
+ if max_rgb == min_rgb:
49
+ h = 0
50
+
51
+ # if max_rgb==r then h is computed as follows
52
+ elif max_rgb == r:
53
+ h = (60 * ((g - b) / difference) + 360) % 360
54
+
55
+ # if max_rgb==g then compute h as follows
56
+ elif max_rgb == g:
57
+ h = (60 * ((b - r) / difference) + 120) % 360
58
+
59
+ # if max_rgb=b then compute h
60
+ elif max_rgb == b:
61
+ h = (60 * ((r - g) / difference) + 240) % 360
62
+
63
+ # if max_rgb==zero then s=0
64
+ if max_rgb == 0:
65
+ s = 0
66
+ else:
67
+ s = (difference / max_rgb) * 100
68
+
69
+ # compute v
70
+ v = max_rgb * 100
71
+ # return rounded values of H, S and V
72
+ return tuple(map(round, (h, s, v)))
73
+
74
  class DisentanglementBase:
75
+ def __init__(self, repo_folder, model, annotations, df, space, colors_list, compute_s=False, variable='H1', categorical=True):
76
  self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
77
  print('Using device', self.device)
78
  self.repo_folder = repo_folder
 
80
  self.annotations = annotations
81
  self.df = df
82
  self.space = space
83
+ self.categorical = categorical
84
+ self.variable = variable
85
 
86
  self.layers = ['input', 'L0_36_512', 'L1_36_512', 'L2_36_512', 'L3_52_512',
87
  'L4_52_512', 'L5_84_512', 'L6_84_512', 'L7_148_512', 'L8_148_512',
 
95
  if compute_s:
96
  self.get_s_space()
97
 
 
98
  def to_hsv(self):
99
  """
100
  The tohsv function takes the top 3 colors of each image and converts them to HSV values.
 
105
  :doc-author: Trelent
106
  """
107
  print('Adding HSV encoding')
108
+ self.df['H1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0])
109
+ self.df['H2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0])
110
+ self.df['H3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0])
111
+
112
+ self.df['S1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1])
113
+ self.df['S2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1])
114
+ self.df['S3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1])
115
+
116
+ self.df['V1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2])
117
+ self.df['V2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2])
118
+ self.df['V3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2])
119
 
120
+ print('Adding RGB encoding')
121
+ self.df['R1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0])
122
+ self.df['R2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0])
123
+ self.df['R3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0])
124
 
125
+ self.df['G1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1])
126
+ self.df['G2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1])
127
+ self.df['G3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1])
128
+
129
+ self.df['B1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2])
130
+ self.df['B2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2])
131
+ self.df['B3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2])
132
 
133
  def get_s_space(self):
134
  """
 
147
  W = w_torch.expand((16, -1)).unsqueeze(0)
148
  s = []
149
  for i,layer in enumerate(self.layers):
150
+ s.append(getattr(self.model.synthesis, layer).affine(W[0, i].unsqueeze(0)).cpu().numpy())
151
 
152
  ss.append(s)
153
  self.annotations['s_vectors'] = ss
 
174
  print('Shape embedding:', X.shape)
175
  return X
176
 
177
+ def get_train_val(self, extremes=False):
178
  X = self.get_encoded_latent()
179
+ y = np.array(self.df[self.variable].values)
180
+ if self.categorical:
181
+ bins = [(x-1) * 360 / (len(self.colors_list) - 1) if x != 1
182
+ else 1 for x in range(len(self.colors_list) + 1)]
183
+ bins[0] = 0
184
  y_cat = pd.cut(y,
185
+ bins=bins,
186
+ labels=self.colors_list,
187
+ include_lowest=True
188
+ )
189
+ print(y_cat.value_counts())
190
  x_train, x_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2)
191
  else:
192
+ if extremes:
193
+ # Calculate the number of elements to consider (10% of array size)
194
+ num_elements = int(0.2 * len(y))
195
+ # Get indices of the top num_elements maximum values
196
+ top_indices = np.argpartition(array, -num_elements)[-num_elements:]
197
+ bottom_indices = np.argpartition(array, -num_elements)[:num_elements]
198
+ y_ext = y[top_indices + bottom_indices, :]
199
+ X_ext = X[top_indices + bottom_indices, :]
200
+ x_train, x_val, y_train, y_val = train_test_split(X_ext, y_ext, test_size=0.2)
201
+ else:
202
+ x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
203
  return x_train, x_val, y_train, y_val
204
 
205
  def InterFaceGAN_separation_vector(self, method='LR', C=0.1):
 
221
  """
222
  x_train, x_val, y_train, y_val = self.get_train_val()
223
 
224
+ if self.categorical:
225
+ if method == 'SVM':
226
+ svc = SVC(gamma='auto', kernel='linear', random_state=0, C=C)
227
+ svc.fit(x_train, y_train)
228
+ print('Val performance SVM', np.round(svc.score(x_val, y_val), 2))
229
+ return svc.coef_ / np.linalg.norm(svc.coef_)
230
+ elif method == 'LR':
231
+ clf = LogisticRegression(random_state=0, C=C)
232
+ clf.fit(x_train, y_train)
233
+ print('Val performance logistic regression', np.round(clf.score(x_val, y_val), 2))
234
+ return clf.coef_ / np.linalg.norm(clf.coef_)
235
+ else:
236
+ clf = LinearRegression()
237
  clf.fit(x_train, y_train)
238
+ print('Val performance linear regression', np.round(clf.score(x_val, y_val), 2))
239
  return clf.coef_ / np.linalg.norm(clf.coef_)
240
+
241
  def get_original_position_latent(self, positive_idxs, negative_idxs):
242
  # ... (existing code for get_original_pos)
243
  separation_vectors = []
 
406
 
407
  return img
408
 
409
+ def generate_changes(self, seed, separation_vector, min_epsilon=-3, max_epsilon=3, count=5, savefig=True, feature=None, method=None, save_separately=False):
410
  """
411
  The regenerate_images function takes a model, z, and decision_boundary as input. It then
412
  constructs an inverse rotation/translation matrix and passes it to the generator. The generator
 
427
  lambdas = np.linspace(min_epsilon, max_epsilon, count)
428
  images = []
429
  # Generate images.
430
+ for _, lambd in enumerate(lambdas):
431
  if self.space.lower() == 's':
432
  images.append(self.generate_flexible_images(seed, separation_vector=separation_vector, lambd=lambd))
433
  elif self.space.lower() in ['z', 'w']:
434
  images.append(self.generate_images(seed, separation_vector=separation_vector, lambd=lambd))
435
 
436
  if savefig:
 
437
  fig, axs = plt.subplots(1, len(images), figsize=(90,20))
438
  title = 'Disentanglement method: '+ method + ', on feature: ' + feature + ' on space: ' + self.space + ', image seed: ' + str(seed)
439
  name = '_'.join([method, feature, self.space, str(seed), str(lambdas[-1])])
 
443
  axs[i].imshow(image)
444
  axs[i].set_title(np.round(lambd, 2))
445
  plt.tight_layout()
446
+ plt.savefig(join(self.repo_folder, 'figures', 'examples', name+'.jpg'))
447
  plt.close()
448
+
449
+ if save_separately:
450
+ for i, (image, lambd) in enumerate(zip(images, lambdas)):
451
+ plt.imshow(image)
452
+ plt.tight_layout()
453
+ plt.savefig(join(self.repo_folder, 'figures', 'examples', name + '_' + str(lambd) + '.jpg'))
454
+ plt.close()
455
+
456
  return images, lambdas
457
 
458
  def get_verification_score(self, separation_vector, feature_id, samples=10, lambd=1, savefig=False, feature=None, method=None):
459
  items = random.sample(range(100000), samples)
460
+ if self.categorical:
461
+ if feature_id == 0:
462
+ hue_low = 0
463
+ hue_high = 1
464
+ elif feature_id == 1:
465
+ hue_low = 1
466
+ hue_high = (feature_id - 1) * 360 / (len(self.colors_list) - 1)
467
+ else:
468
+ hue_low = (feature_id - 1) * 360 / (len(self.colors_list) - 1)
469
+ hue_high = feature_id * 360 / (len(self.colors_list) - 1)
470
 
471
+ matches = 0
472
 
473
+ for seed in tqdm(items):
474
+ images, lambdas = self.generate_changes(seed, separation_vector, min_epsilon=-lambd, max_epsilon=lambd, count=3, savefig=savefig, feature=feature, method=method)
475
+ try:
476
+ colors_negative = extract_color(images[0], 5, 1, None)
477
+ h0, s0, v0 = rgb2hsv(*hex2rgb(colors_negative[0]))
 
 
 
478
 
479
+ colors_orig = extract_color(images[1], 5, 1, None)
480
+ h1, s1, v1 = rgb2hsv(*hex2rgb(colors_orig[0]))
481
+
482
+ colors_positive = extract_color(images[2], 5, 1, None)
483
+ h2, s2, v2 = rgb2hsv(*hex2rgb(colors_positive[0]))
484
+
485
+ if h1 > hue_low and h1 < hue_high:
486
+ samples -= 1
487
+ else:
488
+ if (h0 > hue_low and h0 < hue_high) or (h2 > hue_low and h2 < hue_high):
489
+ matches += 1
490
 
491
+ except Exception as e:
492
+ print(e)
493
+
494
+ return np.round(matches / samples, 2)
 
495
 
496
+ else:
497
+ increase = 0
498
 
499
+ for seed in tqdm(items):
500
+ images, lambdas = self.generate_changes(seed, separation_vector, min_epsilon=-lambd,
501
+ max_epsilon=lambd, count=3, savefig=savefig,
502
+ feature=feature, method=method)
503
+ try:
504
+ colors_negative = extract_color(images[0], 5, 1, None)
505
+ r0, g0, b0 = hex2rgb(colors_negative[0])
506
+ h0, s0, v0 = rgb2hsv(*hex2rgb(colors_negative[0]))
507
 
508
+ colors_orig = extract_color(images[1], 5, 1, None)
509
+ r1, g1, b1 = hex2rgb(colors_orig[0])
510
+ h1, s1, v1 = rgb2hsv(*hex2rgb(colors_orig[0]))
511
+
512
+ colors_positive = extract_color(images[2], 5, 1, None)
513
+ r2, g2, b2 = hex2rgb(colors_positive[0])
514
+ h2, s2, v2 = rgb2hsv(*hex2rgb(colors_positive[0]))
515
+
516
+ if 's' in self.variable.lower():
517
+ increase += max(0, s2 - s1)
518
+ elif 'v' in self.variable.lower():
519
+ increase += max(0, v2 - v1)
520
+ elif 'r' in self.variable.lower():
521
+ increase += max(0, r2 - r1)
522
+ elif 'g' in self.variable.lower():
523
+ increase += max(0, g2 - g1)
524
+ elif 'b' in self.variable.lower():
525
+ increase += max(0, b2 - b1)
526
+ else:
527
+ raise('Continous variable not allowed, choose between RGB or SV')
528
+ except Exception as e:
529
+ print(e)
530
+
531
+ return np.round(increase / samples, 2)
532
+
533
 
534
+ def continous_experiment(name, var, repo_folder, model, annotations, df, space, colors_list, kwargs):
535
+ scores = []
536
+ print(f'Launching {name} experiment')
537
+ disentanglemnet_exp = DisentanglementBase(repo_folder, model, annotations, df, space=space, colors_list=colors_list, compute_s=False, variable=var, categorical=False)
538
+ for extr in kwargs['extremes']:
539
+ separation_vector = disentanglemnet_exp.InterFaceGAN_separation_vector()
540
+ print(f'Generating images with variations for {name}')
541
+ for s in range(30):
542
+ seed = random.randint(0,100000)
543
+ for eps in kwargs['max_lambda']:
544
+ disentanglemnet_exp.generate_changes(seed, separation_vector, min_epsilon=-eps, max_epsilon=eps, savefig=True, feature=name, method= 'InterFaceGAN_' + str(extr))
545
+
546
+ print('Finally obtaining verification score')
547
+ for verif in kwargs['lambda_verif']:
548
+ score = disentanglemnet_exp.get_verification_score(separation_vector, 0, samples=kwargs['samples'], lambd=verif, savefig=False, feature=name, method='InterFaceGAN_' + str(extr))
549
+ print(f'Score for method InterfaceGAN on {name}:', score)
550
+
551
+ scores.append([space, 'InterFaceGAN', name, score, 'extremes method:' + str(extr) + 'verification lambda:' + str(verif), ', '.join(list(separation_vector.astype(str)))])
552
+
553
+ score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector'])
554
+ print(score_df)
555
+ score_df.to_csv(join(repo_folder, f'data/scores_{name}.csv'))
556
+
557
  def main():
558
  repo_folder = '.'
559
  annotations_file = join(repo_folder, 'data/textile_annotated_files/seeds0000-100000_S.pkl')
 
570
  colors_list = ['Red', 'Orange', 'Yellow', 'Yellow Green', 'Chartreuse Green',
571
  'Kelly Green', 'Green Blue Seafoam', 'Cyan Blue',
572
  'Warm Blue', 'Indigo', 'Purple Magenta', 'Magenta Pink']
573
+ colors_list = ['Gray', 'Red Orange', 'Yellow', 'Green', 'Light Blue',
574
+ 'Blue', 'Purple', 'Pink']
575
 
576
  scores = []
577
+ kwargs = {'CL method':['LR', 'SVM'], 'C':[0.1, 1], 'sign':[True, False],
578
+ 'num_factors':[1, 5, 10, 20], 'cutout': [None], 'max_lambda':[18, 6],
579
+ 'samples':30, 'lambda_verif':[14, 7], 'extremes':[True, False]}
580
+ continuous = False
581
+ specific_examples = [53139, 99376, 16, 99585, 40851, 70, 17703, 44, 52628,
582
+ 99884, 52921, 46180, 19995, 40920, 554]
583
 
584
+ if specific_examples is not None:
585
+ disentanglemnet_exp = DisentanglementBase(repo_folder, model, annotations, df, space='w', colors_list=colors_list, compute_s=False)
586
+
587
+ separation_vectors = disentanglemnet_exp.StyleSpace_separation_vector(sign=True, num_factors=10, cutout=None)
588
+ # separation_vectors = disentanglemnet_exp.InterFaceGAN_separation_vector(method='LR', C=0.1)
589
+ for specific_example in specific_examples:
590
+ seed = specific_example
591
+ for i, color in enumerate(colors_list):
592
+ disentanglemnet_exp.generate_changes(seed, separation_vectors[i], min_epsilon=-9, max_epsilon=9, savefig=True, save_separately=True, feature=color, method='StyleSpace' + '_' + str(True) + '_' + str(10) + '_' + str(None))
593
+
594
+ return
595
+
596
+ for space in ['w', ]: #'z', 's'
597
  print('Launching experiment with space:', space)
598
+
599
+ if continuous:
600
+ continous_experiment('Saturation', 'S1', repo_folder, model, annotations, df, space, colors_list, kwargs)
601
+ continous_experiment('Value', 'V1', repo_folder, model, annotations, df, space, colors_list, kwargs)
602
+ continous_experiment('Red', 'R1', repo_folder, model, annotations, df, space, colors_list, kwargs)
603
+ continous_experiment('Green', 'G1', repo_folder, model, annotations, df, space, colors_list, kwargs)
604
+ continous_experiment('Blue', 'B1', repo_folder, model, annotations, df, space, colors_list, kwargs)
605
+ break
606
+
607
+ print('Launching Hue experiment')
608
+ variable = 'H1'
609
+ disentanglemnet_exp = DisentanglementBase(repo_folder, model, annotations, df, space=space, colors_list=colors_list, compute_s=False, variable=variable)
610
 
611
+ for method in ['StyleSpace', 'InterFaceGAN',]: #'GANSpace'
612
  if space != 's' and method == 'InterFaceGAN':
613
  print('Now obtaining separation vector for using InterfaceGAN')
614
  for met in kwargs['CL method']:
615
  for c in kwargs['C']:
616
  separation_vectors = disentanglemnet_exp.InterFaceGAN_separation_vector(method=met, C=c)
617
  for i, color in enumerate(colors_list):
618
+ print(f'Generating images with variations for color {color}')
619
  for s in range(30):
620
  seed = random.randint(0,100000)
621
  for eps in kwargs['max_lambda']:
622
+ disentanglemnet_exp.generate_changes(seed, separation_vectors[i], min_epsilon=-eps, max_epsilon=eps, savefig=True, feature=color, method=str(method) + '_' + str(met) + '_' + str(c) + '_' + str(len(colors_list)) + '_' + str(variable))
623
 
624
  print('Finally obtaining verification score')
625
  for verif in kwargs['lambda_verif']:
 
629
  scores.append([space, method, color, score, 'classification method:' + met + ', regularization: ' + str(c) + ', verification lambda:' + str(verif), ', '.join(list(separation_vectors[i].astype(str)))])
630
  score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector'])
631
  print(score_df)
632
+ score_df.to_csv(join(repo_folder, f'data/scores_InterfaceGAN_{variable}_{len(colors_list)}.csv'))
633
 
634
 
635
  elif method == 'StyleSpace':
 
639
  for cutout in kwargs['cutout']:
640
  separation_vectors = disentanglemnet_exp.StyleSpace_separation_vector(sign=sign, num_factors=num_factors, cutout=cutout)
641
  for i, color in enumerate(colors_list):
642
+ print(f'Generating images with variations for color {color}')
643
  for s in range(30):
644
  seed = random.randint(0,100000)
645
  for eps in kwargs['max_lambda']:
646
+ disentanglemnet_exp.generate_changes(seed, separation_vectors[i], min_epsilon=-eps, max_epsilon=eps, savefig=True, feature=color, method=method + '_' + str(num_factors) + '_' + str(cutout) + '_' + str(sign) + '_' + str(len(colors_list)) + '_' + str(variable))
647
 
648
  print('Finally obtaining verification score')
649
  for verif in kwargs['lambda_verif']:
 
653
  scores.append([space, method, color, score, 'using sign:' + str(sign) + ', number of factors: ' + str(num_factors) + ', using cutout: ' + str(cutout) + ', verification lambda:' + str(verif), ', '.join(list(separation_vectors[i].astype(str)))])
654
  score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector'])
655
  print(score_df)
656
+ score_df.to_csv(join(repo_folder, f'data/scores_StyleSpace_{variable}_{len(colors_list)}.csv'))
657
 
658
  if space == 'w' and method == 'GANSpace':
659
  print('Now obtaining separation vector for using GANSpace')
660
  separation_vectors = disentanglemnet_exp.GANSpace_separation_vectors(100)
661
+ print(separation_vectors.shape)
662
  for s in range(30):
663
  print('Generating images with variations')
664
  seed = random.randint(0,100000)
665
  for i in range(100):
666
  for eps in kwargs['max_lambda']:
667
+ disentanglemnet_exp.generate_changes(seed, separation_vectors.T[i], min_epsilon=-eps, max_epsilon=eps, savefig=True, feature='dimension_' + str(i), method=method)
668
 
669
  score = None
670
+ scores.append([space, method, 'PCA', score, '100', ', '.join(list(separation_vectors.T[i].astype(str)))])
671
  else:
672
  print('Skipping', method, 'on space', space)
673
  continue
674
 
 
 
675
  score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector'])
676
  print(score_df)
677
+ score_df.to_csv(join(repo_folder, 'data/scores_{}.csv'.format(pd.to_datetime.now().strftime("%Y-%m-%d_%H%M%S"))))
678
 
679
  if __name__ == "__main__":
680
  main()
check_images.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+ from sklearn.svm import SVC
7
+ from sklearn.decomposition import PCA
8
+ from sklearn.linear_model import LogisticRegression, LinearRegression
9
+ from sklearn.model_selection import train_test_split
10
+
11
+ from tqdm import tqdm
12
+ import random
13
+ from os.path import join
14
+ import os
15
+ import pickle
16
+
17
+ import torch
18
+
19
+ import matplotlib.pyplot as plt
20
+ import PIL
21
+ from PIL import Image, ImageColor
22
+
23
+ import sys
24
+ sys.path.append('backend')
25
+ from color_annotations import extract_color
26
+ from networks_stylegan3 import *
27
+ sys.path.append('.')
28
+
29
+ import dnnlib
30
+ import legacy
31
+
32
+ def hex2rgb(hex_value):
33
+ h = hex_value.strip("#")
34
+ rgb = tuple(int(h[i:i+2], 16) for i in (0, 2, 4))
35
+ return rgb
36
+
37
+ def rgb2hsv(r, g, b):
38
+ # Normalize R, G, B values
39
+ r, g, b = r / 255.0, g / 255.0, b / 255.0
40
+
41
+ # h, s, v = hue, saturation, value
42
+ max_rgb = max(r, g, b)
43
+ min_rgb = min(r, g, b)
44
+ difference = max_rgb-min_rgb
45
+
46
+ # if max_rgb and max_rgb are equal then h = 0
47
+ if max_rgb == min_rgb:
48
+ h = 0
49
+
50
+ # if max_rgb==r then h is computed as follows
51
+ elif max_rgb == r:
52
+ h = (60 * ((g - b) / difference) + 360) % 360
53
+
54
+ # if max_rgb==g then compute h as follows
55
+ elif max_rgb == g:
56
+ h = (60 * ((b - r) / difference) + 120) % 360
57
+
58
+ # if max_rgb=b then compute h
59
+ elif max_rgb == b:
60
+ h = (60 * ((r - g) / difference) + 240) % 360
61
+
62
+ # if max_rgb==zero then s=0
63
+ if max_rgb == 0:
64
+ s = 0
65
+ else:
66
+ s = (difference / max_rgb) * 100
67
+
68
+ # compute v
69
+ v = max_rgb * 100
70
+ # return rounded values of H, S and V
71
+ return tuple(map(round, (h, s, v)))
72
+
73
+
74
+ class DisentanglementBase:
75
+ def __init__(self, repo_folder, model, annotations, df, space, colors_list, compute_s=False, variable='H1', categorical=True):
76
+ self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
77
+ print('Using device', self.device)
78
+ self.repo_folder = repo_folder
79
+ self.model = model.to(self.device)
80
+ self.annotations = annotations
81
+ self.df = df
82
+ self.space = space
83
+ self.categorical = categorical
84
+ self.variable = variable
85
+
86
+ self.layers = ['input', 'L0_36_512', 'L1_36_512', 'L2_36_512', 'L3_52_512',
87
+ 'L4_52_512', 'L5_84_512', 'L6_84_512', 'L7_148_512', 'L8_148_512',
88
+ 'L9_148_362', 'L10_276_256', 'L11_276_181', 'L12_276_128',
89
+ 'L13_256_128', 'L14_256_3']
90
+ self.layers_shapes = [4, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 362, 256, 181, 128, 128]
91
+ self.decoding_layers = 16
92
+ self.colors_list = colors_list
93
+
94
+ self.to_hsv()
95
+ if compute_s:
96
+ self.get_s_space()
97
+
98
+
99
+ def to_hsv(self):
100
+ """
101
+ The tohsv function takes the top 3 colors of each image and converts them to HSV values.
102
+ It then adds these values as new columns in the dataframe.
103
+
104
+ :param self: Allow the function to access the dataframe
105
+ :return: The dataframe with the new columns added
106
+ :doc-author: Trelent
107
+ """
108
+ print('Adding HSV encoding')
109
+ self.df['H1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0])
110
+ self.df['H2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0])
111
+ self.df['H3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0])
112
+
113
+ self.df['S1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1])
114
+ self.df['S2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1])
115
+ self.df['S3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1])
116
+
117
+ self.df['V1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2])
118
+ self.df['V2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2])
119
+ self.df['V3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2])
120
+
121
+ print('Adding RGB encoding')
122
+ self.df['R1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0])
123
+ self.df['R2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0])
124
+ self.df['R3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0])
125
+
126
+ self.df['G1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1])
127
+ self.df['G2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1])
128
+ self.df['G3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1])
129
+
130
+ self.df['B1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2])
131
+ self.df['B2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2])
132
+ self.df['B3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2])
133
+ return self.df
134
+
135
+ def get_encoded_latent(self):
136
+ # ... (existing code for getX)
137
+ if self.space.lower() == 'w':
138
+ X = np.array(self.annotations['w_vectors']).reshape((len(self.annotations['w_vectors']), 512))
139
+ elif self.space.lower() == 'z':
140
+ X = np.array(self.annotations['z_vectors']).reshape((len(self.annotations['z_vectors']), 512))
141
+ elif self.space.lower() == 's':
142
+ concat_v = []
143
+ for i in range(len(self.annotations['w_vectors'])):
144
+ concat_v.append(np.concatenate(self.annotations['s_vectors'][i], axis=1))
145
+ X = np.array(concat_v)
146
+ X = X[:, 0, :]
147
+ else:
148
+ Exception("Sorry, option not available, select among Z, W, S")
149
+
150
+ print('Shape embedding:', X.shape)
151
+ return X
152
+
153
+ def get_train_val(self, extremes=False):
154
+ X = self.get_encoded_latent()
155
+ y = np.array(self.df[self.variable].values)
156
+ if self.categorical:
157
+ y_cat = pd.cut(y,
158
+ bins=[x * 360 / len(self.colors_list) if x < len(self.colors_list)
159
+ else 360 for x in range(len(self.colors_list) + 1)],
160
+ labels=self.colors_list
161
+ ).fillna(self.colors_list[0])
162
+ x_train, x_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2)
163
+ else:
164
+ if extremes:
165
+ # Calculate the number of elements to consider (10% of array size)
166
+ num_elements = int(0.2 * len(y))
167
+ # Get indices of the top num_elements maximum values
168
+ top_indices = np.argpartition(array, -num_elements)[-num_elements:]
169
+ bottom_indices = np.argpartition(array, -num_elements)[:num_elements]
170
+ y_ext = y[top_indices + bottom_indices, :]
171
+ X_ext = X[top_indices + bottom_indices, :]
172
+ x_train, x_val, y_train, y_val = train_test_split(X_ext, y_ext, test_size=0.2)
173
+ else:
174
+ x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
175
+ return x_train, x_val, y_train, y_val
176
+
177
+ def generate_orig_image(self, vec, seed=False):
178
+ """
179
+ The generate_original_image function takes in a latent vector and the model,
180
+ and returns an image generated from that latent vector.
181
+
182
+
183
+ :param z: Generate the image
184
+ :param model: Generate the image
185
+ :return: A pil image
186
+ :doc-author: Trelent
187
+ """
188
+ G = self.model.to(self.device) # type: ignore
189
+ # Labels.
190
+ label = torch.zeros([1, G.c_dim], device=self.device)
191
+ if seed:
192
+ seed = vec
193
+ vec = self.annotations['z_vectors'][seed]
194
+
195
+ Z = torch.from_numpy(vec.copy()).to(self.device)
196
+ img = G(Z, label, truncation_psi=1, noise_mode='const')
197
+ img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
198
+ img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB')
199
+ return img
200
+
201
+ def main():
202
+ repo_folder = '.'
203
+ annotations_file = join(repo_folder, 'data/textile_annotated_files/seeds0000-100000_S.pkl')
204
+ with open(annotations_file, 'rb') as f:
205
+ annotations = pickle.load(f)
206
+
207
+ df_file = join(repo_folder, 'data/textile_annotated_files/top_three_colours.csv')
208
+ df = pd.read_csv(df_file).fillna('#000000')
209
+
210
+ model_file = join(repo_folder, 'data/textile_model_files/network-snapshot-005000.pkl')
211
+ with dnnlib.util.open_url(model_file) as f:
212
+ model = legacy.load_network_pkl(f)['G_ema'] # type: ignore
213
+
214
+ colors_list = ['Red', 'Orange', 'Yellow', 'Yellow Green', 'Chartreuse Green',
215
+ 'Kelly Green', 'Green Blue Seafoam', 'Cyan Blue',
216
+ 'Warm Blue', 'Indigo', 'Purple Magenta', 'Magenta Pink']
217
+ colors_list = ['Red Orange', 'Yellow', 'Green', 'Light Blue',
218
+ 'Blue', 'Purple', 'Pink']
219
+
220
+
221
+ disentanglemnet_exp = DisentanglementBase(repo_folder, model, annotations, df, space='w', colors_list=colors_list)
222
+ # x_train, x_val, y_train, y_val = disentanglemnet_exp.get_train_val()
223
+ # print(colors_list)
224
+ # print(np.unique(y_train, return_counts=True))
225
+
226
+
227
+ # for i, color in enumerate(colors_list):
228
+ # idxs = np.where(y_train == color)
229
+ # x_color = x_train[idxs][:30, :]
230
+ # print(x_color.shape)
231
+ # print('Generating images of color ' + color)
232
+ # for j, vec in enumerate(x_color):
233
+ # vec = np.expand_dims(vec, axis=0)
234
+ # img = disentanglemnet_exp.generate_orig_image(vec)
235
+ # img.save(f'{repo_folder}/colors_test/color_{color}_{j}.png')
236
+
237
+ df = disentanglemnet_exp.to_hsv()
238
+ df['color'] = pd.cut(df['H1'],
239
+ bins=[x * 360 / len(colors_list) if x < len(colors_list)
240
+ else 360 for x in range(len(colors_list) + 1)],
241
+ labels=colors_list
242
+ ).fillna(colors_list[0])
243
+
244
+ print(df['color'].value_counts())
245
+ df['seed'] = df['fname'].str.split('/').apply(lambda x: x[-1]).str.replace('seed', '').str.replace('.png','').astype(int)
246
+ print(df[df['seed'] == 3][['H1', 'S1', 'V1', 'R1', 'B1', 'G1']])
247
+ for i, color in enumerate(colors_list):
248
+ idxs = df['color'] == color
249
+ x_color = df['seed'][idxs][:30]
250
+ print('Generating images of color ' + color)
251
+ for j, vec in enumerate(x_color):
252
+ img = disentanglemnet_exp.generate_orig_image(int(vec), seed=True)
253
+ img.save(f'{repo_folder}/colors_test/color_{color}_{j}corrected.png')
254
+
255
+ if __name__ == "__main__":
256
+ main()
data/scores_Blue.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a54439734a2f8f107f6236ad8732ab049639e4d565617cc7f3d89e79d9c29428
3
+ size 27620
data/scores_Green.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b276b23fb5c1abb8226e1e0790f7c77509950f1eb2443cab71f154267a4c7c83
3
+ size 27491
data/scores_InterfaceGAN_H1_8.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f9b22fba3e1a4dabf3ab59342536c707f9c55b04c257d1da49c6a6be9bac082
3
+ size 919823
data/scores_Red.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ae0e9ee9a907881b19542eb19cd98947b9f3d2a3ccca61f6dd25823a3fb8e82
3
+ size 27619
data/scores_Saturation.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac053d8ed3f6514f4ac7b3c4a279aac2889bee02c68acc7d4ad45ccb88bf84c3
3
+ size 27564
data/scores_StyleSpace_H1_8.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bfc5681aa827b2be07cb0ce00eefd6464e2f0836216b884858b5866ffb8aa80
3
+ size 360571
data/scores_Value.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b9251b5089425cbf640894e576ce648011b81b2e2a6a74b35e898984a595efa
3
+ size 27516
data/textile_annotated_files/seeds0000-100000_S.pkl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8763836ea1142f6f2e3d36b7fe92bcf9a4549e9ef8e0a83a02b4772d64e95d54
3
  size 3178623075
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88dffd2abe21053c375420a6babcb12e93f2925ccdd192a09e51ab917f9ab0f3
3
  size 3178623075
test_disentanglement.sh ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --time=1-00:00:00
3
+ #SBATCH --mem=32GB
4
+ #SBATCH --gres gpu:1
5
+
6
+ module load v100
7
+ module load cuda
8
+ module load mamba
9
+ source activate test
10
+
11
+ python DisentanglementBase.py
12
+ conda deactivate