Spaces:
Runtime error
Runtime error
#!/usr/bin/env python | |
import numpy as np | |
import pandas as pd | |
from sklearn.svm import SVC | |
from sklearn.decomposition import PCA | |
from sklearn.linear_model import LogisticRegression, LinearRegression | |
from sklearn.model_selection import train_test_split | |
from tqdm import tqdm | |
import random | |
from os.path import join | |
import os | |
import pickle | |
import torch | |
import matplotlib.pyplot as plt | |
import PIL | |
from PIL import Image, ImageColor | |
import sys | |
sys.path.append('backend') | |
from color_annotations import extract_color | |
from networks_stylegan3 import * | |
sys.path.append('.') | |
import dnnlib | |
import legacy | |
def hex2rgb(hex_value): | |
h = hex_value.strip("#") | |
rgb = tuple(int(h[i:i+2], 16) for i in (0, 2, 4)) | |
return rgb | |
def rgb2hsv(r, g, b): | |
# Normalize R, G, B values | |
r, g, b = r / 255.0, g / 255.0, b / 255.0 | |
# h, s, v = hue, saturation, value | |
max_rgb = max(r, g, b) | |
min_rgb = min(r, g, b) | |
difference = max_rgb-min_rgb | |
# if max_rgb and max_rgb are equal then h = 0 | |
if max_rgb == min_rgb: | |
h = 0 | |
# if max_rgb==r then h is computed as follows | |
elif max_rgb == r: | |
h = (60 * ((g - b) / difference) + 360) % 360 | |
# if max_rgb==g then compute h as follows | |
elif max_rgb == g: | |
h = (60 * ((b - r) / difference) + 120) % 360 | |
# if max_rgb=b then compute h | |
elif max_rgb == b: | |
h = (60 * ((r - g) / difference) + 240) % 360 | |
# if max_rgb==zero then s=0 | |
if max_rgb == 0: | |
s = 0 | |
else: | |
s = (difference / max_rgb) * 100 | |
# compute v | |
v = max_rgb * 100 | |
# return rounded values of H, S and V | |
return tuple(map(round, (h, s, v))) | |
class DisentanglementBase: | |
def __init__(self, repo_folder, model, annotations, df, space, colors_list, compute_s=False, variable='H1', categorical=True): | |
self.device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
print('Using device', self.device) | |
self.repo_folder = repo_folder | |
self.model = model.to(self.device) | |
self.annotations = annotations | |
self.df = df | |
self.space = space | |
self.categorical = categorical | |
self.variable = variable | |
self.layers = ['input', 'L0_36_512', 'L1_36_512', 'L2_36_512', 'L3_52_512', | |
'L4_52_512', 'L5_84_512', 'L6_84_512', 'L7_148_512', 'L8_148_512', | |
'L9_148_362', 'L10_276_256', 'L11_276_181', 'L12_276_128', | |
'L13_256_128', 'L14_256_3'] | |
self.layers_shapes = [4, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 362, 256, 181, 128, 128] | |
self.decoding_layers = 16 | |
self.colors_list = colors_list | |
self.to_hsv() | |
if compute_s: | |
self.get_s_space() | |
def to_hsv(self): | |
""" | |
The tohsv function takes the top 3 colors of each image and converts them to HSV values. | |
It then adds these values as new columns in the dataframe. | |
:param self: Allow the function to access the dataframe | |
:return: The dataframe with the new columns added | |
:doc-author: Trelent | |
""" | |
print('Adding HSV encoding') | |
self.df['H1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0]) | |
self.df['H2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0]) | |
self.df['H3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0]) | |
self.df['S1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1]) | |
self.df['S2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1]) | |
self.df['S3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1]) | |
self.df['V1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2]) | |
self.df['V2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2]) | |
self.df['V3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2]) | |
print('Adding RGB encoding') | |
self.df['R1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0]) | |
self.df['R2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0]) | |
self.df['R3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0]) | |
self.df['G1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1]) | |
self.df['G2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1]) | |
self.df['G3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1]) | |
self.df['B1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2]) | |
self.df['B2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2]) | |
self.df['B3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2]) | |
def get_s_space(self): | |
""" | |
The get_s_space function takes the w_vectors from the annotations dictionary and uses them to generate s_vectors. | |
The s_space is a space of vectors that are generated by passing each w vector through each layer of the model. | |
This allows us to see how much information about a particular class is contained in different layers. | |
:param self: Bind the method to a class | |
:return: A list of lists of s vectors | |
:doc-author: Trelent | |
""" | |
print('Getting S space from W') | |
ss = [] | |
for w in tqdm(self.annotations['w_vectors']): | |
w_torch = torch.from_numpy(w).to(self.device) | |
W = w_torch.expand((16, -1)).unsqueeze(0) | |
s = [] | |
for i,layer in enumerate(self.layers): | |
s.append(getattr(self.model.synthesis, layer).affine(W[0, i].unsqueeze(0)).cpu().numpy()) | |
ss.append(s) | |
self.annotations['s_vectors'] = ss | |
annotations_file = join(self.repo_folder, 'data/textile_annotated_files/seeds0000-100000_S.pkl') | |
print('Storing s for future use here:', annotations_file) | |
with open(annotations_file, 'wb') as f: | |
pickle.dump(self.annotations, f) | |
def get_encoded_latent(self): | |
# ... (existing code for getX) | |
if self.space.lower() == 'w': | |
X = np.array(self.annotations['w_vectors']).reshape((len(self.annotations['w_vectors']), 512)) | |
elif self.space.lower() == 'z': | |
X = np.array(self.annotations['z_vectors']).reshape((len(self.annotations['z_vectors']), 512)) | |
elif self.space.lower() == 's': | |
concat_v = [] | |
for i in range(len(self.annotations['w_vectors'])): | |
concat_v.append(np.concatenate(self.annotations['s_vectors'][i], axis=1)) | |
X = np.array(concat_v) | |
X = X[:, 0, :] | |
else: | |
Exception("Sorry, option not available, select among Z, W, S") | |
print('Shape embedding:', X.shape) | |
return X | |
def get_train_val(self, extremes=False): | |
X = self.get_encoded_latent() | |
y = np.array(self.df[self.variable].values) | |
if self.categorical: | |
bins = [(x-1) * 360 / (len(self.colors_list) - 1) if x != 1 | |
else 1 for x in range(len(self.colors_list) + 1)] | |
bins[0] = 0 | |
y_cat = pd.cut(y, | |
bins=bins, | |
labels=self.colors_list, | |
include_lowest=True | |
) | |
print(y_cat.value_counts()) | |
x_train, x_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2) | |
else: | |
if extremes: | |
# Calculate the number of elements to consider (10% of array size) | |
num_elements = int(0.2 * len(y)) | |
# Get indices of the top num_elements maximum values | |
top_indices = np.argpartition(array, -num_elements)[-num_elements:] | |
bottom_indices = np.argpartition(array, -num_elements)[:num_elements] | |
y_ext = y[top_indices + bottom_indices, :] | |
X_ext = X[top_indices + bottom_indices, :] | |
x_train, x_val, y_train, y_val = train_test_split(X_ext, y_ext, test_size=0.2) | |
else: | |
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2) | |
return x_train, x_val, y_train, y_val | |
def InterFaceGAN_separation_vector(self, method='LR', C=0.1): | |
""" | |
Method from InterfaceGAN | |
The get_separation_space function takes in a type_bin, annotations, and df. | |
It then samples 100 of the most representative abstracts for that type_bin and 100 of the least representative abstracts for that type_bin. | |
It then trains an SVM or logistic regression model on these 200 samples to find a separation space between them. | |
The function returns this separation space as well as how many nodes are important in this separation space. | |
:param type_bin: Select the type of abstracts to be used for training | |
:param annotations: Access the z_vectors | |
:param df: Get the abstracts that are used for training | |
:param samples: Determine how many samples to take from the top and bottom of the distribution | |
:param method: Specify the classifier to use | |
:param C: Control the regularization strength | |
:return: The weights of the linear classifier | |
:doc-author: Trelent | |
""" | |
x_train, x_val, y_train, y_val = self.get_train_val() | |
if self.categorical: | |
if method == 'SVM': | |
svc = SVC(gamma='auto', kernel='linear', random_state=0, C=C) | |
svc.fit(x_train, y_train) | |
print('Val performance SVM', np.round(svc.score(x_val, y_val), 2)) | |
return svc.coef_ / np.linalg.norm(svc.coef_) | |
elif method == 'LR': | |
clf = LogisticRegression(random_state=0, C=C) | |
clf.fit(x_train, y_train) | |
print('Val performance logistic regression', np.round(clf.score(x_val, y_val), 2)) | |
return clf.coef_ / np.linalg.norm(clf.coef_) | |
else: | |
clf = LinearRegression() | |
clf.fit(x_train, y_train) | |
print('Val performance linear regression', np.round(clf.score(x_val, y_val), 2)) | |
return clf.coef_ / np.linalg.norm(clf.coef_) | |
def get_original_position_latent(self, positive_idxs, negative_idxs): | |
# ... (existing code for get_original_pos) | |
separation_vectors = [] | |
for i in range(len(self.colors_list)): | |
if self.space.lower() == 's': | |
current_idx = 0 | |
vectors = [] | |
for j, (leng, layer) in enumerate(zip(self.layers_shapes, self.layers)): | |
arr = np.zeros(leng) | |
for positive_idx in positive_idxs[i]: | |
if positive_idx >= current_idx and positive_idx < current_idx + leng: | |
arr[positive_idx - current_idx] = 1 | |
for negative_idx in negative_idxs[i]: | |
if negative_idx >= current_idx and negative_idx < current_idx + leng: | |
arr[negative_idx - current_idx] = 1 | |
arr = arr / (np.linalg.norm(arr) + 0.000001) | |
vectors.append(arr) | |
current_idx += leng | |
elif self.space.lower() == 'z' or self.space.lower() == 'w': | |
vectors = np.zeros(512) | |
vectors[positive_idxs[i]] = 1 | |
vectors[negative_idxs[i]] = -1 | |
vectors = vectors / (np.linalg.norm(vectors) + 0.000001) | |
else: | |
raise Exception("""This space is not allowed in this function, | |
select among Z, W, S""") | |
separation_vectors.append(vectors) | |
return separation_vectors | |
def StyleSpace_separation_vector(self, sign=True, num_factors=20, cutout=0.25): | |
""" Formula from StyleSpace Analysis """ | |
x_train, x_val, y_train, y_val = self.get_train_val() | |
positive_idxs = [] | |
negative_idxs = [] | |
for color in self.colors_list: | |
x_col = x_train[np.where(y_train == color)] | |
mp = np.mean(x_train, axis=0) | |
sp = np.std(x_train, axis=0) | |
de = (x_col - mp) / sp | |
meu = np.mean(de, axis=0) | |
seu = np.std(de, axis=0) | |
if sign: | |
thetau = meu / seu | |
positive_idx = np.argsort(thetau)[-num_factors//2:] | |
negative_idx = np.argsort(thetau)[:num_factors//2] | |
else: | |
thetau = np.abs(meu) / seu | |
positive_idx = np.argsort(thetau)[-num_factors:] | |
negative_idx = [] | |
if cutout: | |
beyond_cutout = np.where(np.abs(thetau) > cutout) | |
positive_idx = np.intersect1d(positive_idx, beyond_cutout) | |
negative_idx = np.intersect1d(negative_idx, beyond_cutout) | |
if len(positive_idx) == 0 and len(negative_idx) == 0: | |
print('No values found above the current cutout', cutout, 'for color', color, '.\n Disentangled vector will be all zeros.' ) | |
positive_idxs.append(positive_idx) | |
negative_idxs.append(negative_idx) | |
separation_vectors = self.get_original_position_latent(positive_idxs, negative_idxs) | |
return separation_vectors | |
def GANSpace_separation_vectors(self, num_components): | |
x_train, x_val, y_train, y_val = self.get_train_val() | |
if self.space.lower() == 'w': | |
pca = PCA(n_components=num_components) | |
dims_pca = pca.fit_transform(x_train.T) | |
dims_pca /= np.linalg.norm(dims_pca, axis=0) | |
return dims_pca | |
else: | |
raise("""This space is not allowed in this function, | |
only W""") | |
def generate_images(self, seed, separation_vector=None, lambd=0): | |
""" | |
The generate_original_image function takes in a latent vector and the model, | |
and returns an image generated from that latent vector. | |
:param z: Generate the image | |
:param model: Generate the image | |
:return: A pil image | |
:doc-author: Trelent | |
""" | |
G = self.model.to(self.device) # type: ignore | |
# Labels. | |
label = torch.zeros([1, G.c_dim], device=self.device) | |
if self.space.lower() == 'z': | |
vec = self.annotations['z_vectors'][seed] | |
Z = torch.from_numpy(vec.copy()).to(self.device) | |
if separation_vector is not None: | |
change = torch.from_numpy(separation_vector.copy()).unsqueeze(0).to(self.device) | |
Z = torch.add(Z, change, alpha=lambd) | |
img = G(Z, label, truncation_psi=1, noise_mode='const') | |
elif self.space.lower() == 'w': | |
vec = self.annotations['w_vectors'][seed] | |
W = torch.from_numpy(np.repeat(vec, self.decoding_layers, axis=0) | |
.reshape(1, self.decoding_layers, vec.shape[1]).copy()).to(self.device) | |
if separation_vector is not None: | |
change = torch.from_numpy(separation_vector.copy()).unsqueeze(0).to(self.device) | |
W = torch.add(W, change, alpha=lambd) | |
img = G.synthesis(W, noise_mode='const') | |
else: | |
raise Exception("""This space is not allowed in this function, | |
select either W or Z or use generate_flexible_images""") | |
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) | |
return PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB') | |
def forward_from_style(self, x, styles, layer): | |
dtype = torch.float16 if (getattr(self.model.synthesis, layer).use_fp16 and self.device=='cuda') else torch.float32 | |
if getattr(self.model.synthesis, layer).is_torgb: | |
weight_gain = 1 / np.sqrt(getattr(self.model.synthesis, layer).in_channels * (getattr(self.model.synthesis, layer).conv_kernel ** 2)) | |
styles = styles * weight_gain | |
input_gain = getattr(self.model.synthesis, layer).magnitude_ema.rsqrt().to(dtype) | |
# Execute modulated conv2d. | |
x = modulated_conv2d(x=x.to(dtype), w=getattr(self.model.synthesis, layer).weight.to(dtype), s=styles.to(dtype), | |
padding=getattr(self.model.synthesis, layer).conv_kernel-1, | |
demodulate=(not getattr(self.model.synthesis, layer).is_torgb), | |
input_gain=input_gain.to(dtype)) | |
# Execute bias, filtered leaky ReLU, and clamping. | |
gain = 1 if getattr(self.model.synthesis, layer).is_torgb else np.sqrt(2) | |
slope = 1 if getattr(self.model.synthesis, layer).is_torgb else 0.2 | |
x = filtered_lrelu.filtered_lrelu(x=x, fu=getattr(self.model.synthesis, layer).up_filter, fd=getattr(self.model.synthesis, layer).down_filter, | |
b=getattr(self.model.synthesis, layer).bias.to(x.dtype), | |
up=getattr(self.model.synthesis, layer).up_factor, down=getattr(self.model.synthesis, layer).down_factor, | |
padding=getattr(self.model.synthesis, layer).padding, | |
gain=gain, slope=slope, clamp=getattr(self.model.synthesis, layer).conv_clamp) | |
return x | |
def generate_flexible_images(self, seed, separation_vector=None, lambd=0): | |
if self.space.lower() != 's': | |
raise Exception("""This space is not allowed in this function, | |
select S or use generate_images""") | |
vec = self.annotations['w_vectors'][seed] | |
w_torch = torch.from_numpy(vec).to(self.device) | |
W = w_torch.expand((self.decoding_layers, -1)).unsqueeze(0) | |
x = self.model.synthesis.input(W[0,0].unsqueeze(0)) | |
for i, layer in enumerate(self.layers[1:]): | |
style = getattr(self.model.synthesis, layer).affine(W[0, i].unsqueeze(0)) | |
if separation_vector is not None: | |
change = torch.from_numpy(separation_vector[i+1].copy()).unsqueeze(0).to(self.device) | |
style = torch.add(style, change, alpha=lambd) | |
x = self.forward_from_style(x, style, layer) | |
if self.model.synthesis.output_scale != 1: | |
x = x * self.model.synthesis.output_scale | |
img = (x.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) | |
img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB') | |
return img | |
def generate_changes(self, seed, separation_vector, min_epsilon=-3, max_epsilon=3, count=5, savefig=True, feature=None, method=None, save_separately=False): | |
""" | |
The regenerate_images function takes a model, z, and decision_boundary as input. It then | |
constructs an inverse rotation/translation matrix and passes it to the generator. The generator | |
expects this matrix as an inverse to avoid potentially failing numerical operations in the network. | |
The function then generates images using G(z_0, label) where z_0 is a linear combination of z and the decision boundary. | |
:param model: Pass in the model to be used for image generation | |
:param z: Generate the starting point of the line | |
:param decision_boundary: Generate images along the direction of the decision boundary | |
:param min_epsilon: Set the minimum value of lambda | |
:param max_epsilon: Set the maximum distance from the original image to generate | |
:param count: Determine the number of images that are generated | |
:return: A list of images and a list of lambdas | |
:doc-author: Trelent | |
""" | |
os.makedirs(join(self.repo_folder, 'figures'), exist_ok=True) | |
lambdas = np.linspace(min_epsilon, max_epsilon, count) | |
images = [] | |
# Generate images. | |
for _, lambd in enumerate(lambdas): | |
if self.space.lower() == 's': | |
images.append(self.generate_flexible_images(seed, separation_vector=separation_vector, lambd=lambd)) | |
elif self.space.lower() in ['z', 'w']: | |
images.append(self.generate_images(seed, separation_vector=separation_vector, lambd=lambd)) | |
if savefig: | |
fig, axs = plt.subplots(1, len(images), figsize=(90,20)) | |
title = 'Disentanglement method: '+ method + ', on feature: ' + feature + ' on space: ' + self.space + ', image seed: ' + str(seed) | |
name = '_'.join([method, feature, self.space, str(seed), str(lambdas[-1])]) | |
fig.suptitle(title, fontsize=20) | |
for i, (image, lambd) in enumerate(zip(images, lambdas)): | |
axs[i].imshow(image) | |
axs[i].set_title(np.round(lambd, 2)) | |
plt.tight_layout() | |
plt.savefig(join(self.repo_folder, 'figures', 'examples', name+'.jpg')) | |
plt.close() | |
if save_separately: | |
for i, (image, lambd) in enumerate(zip(images, lambdas)): | |
plt.imshow(image) | |
plt.tight_layout() | |
plt.savefig(join(self.repo_folder, 'figures', 'examples', name + '_' + str(lambd) + '.jpg')) | |
plt.close() | |
return images, lambdas | |
def get_verification_score(self, separation_vector, feature_id, samples=10, lambd=1, savefig=False, feature=None, method=None): | |
items = random.sample(range(100000), samples) | |
if self.categorical: | |
if feature_id == 0: | |
hue_low = 0 | |
hue_high = 1 | |
elif feature_id == 1: | |
hue_low = 1 | |
hue_high = (feature_id - 1) * 360 / (len(self.colors_list) - 1) | |
else: | |
hue_low = (feature_id - 1) * 360 / (len(self.colors_list) - 1) | |
hue_high = feature_id * 360 / (len(self.colors_list) - 1) | |
matches = 0 | |
for seed in tqdm(items): | |
images, lambdas = self.generate_changes(seed, separation_vector, min_epsilon=-lambd, max_epsilon=lambd, count=3, savefig=savefig, feature=feature, method=method) | |
try: | |
colors_negative = extract_color(images[0], 5, 1, None) | |
h0, s0, v0 = rgb2hsv(*hex2rgb(colors_negative[0])) | |
colors_orig = extract_color(images[1], 5, 1, None) | |
h1, s1, v1 = rgb2hsv(*hex2rgb(colors_orig[0])) | |
colors_positive = extract_color(images[2], 5, 1, None) | |
h2, s2, v2 = rgb2hsv(*hex2rgb(colors_positive[0])) | |
if h1 > hue_low and h1 < hue_high: | |
samples -= 1 | |
else: | |
if (h0 > hue_low and h0 < hue_high) or (h2 > hue_low and h2 < hue_high): | |
matches += 1 | |
except Exception as e: | |
print(e) | |
return np.round(matches / samples, 2) | |
else: | |
increase = 0 | |
for seed in tqdm(items): | |
images, lambdas = self.generate_changes(seed, separation_vector, min_epsilon=-lambd, | |
max_epsilon=lambd, count=3, savefig=savefig, | |
feature=feature, method=method) | |
try: | |
colors_negative = extract_color(images[0], 5, 1, None) | |
r0, g0, b0 = hex2rgb(colors_negative[0]) | |
h0, s0, v0 = rgb2hsv(*hex2rgb(colors_negative[0])) | |
colors_orig = extract_color(images[1], 5, 1, None) | |
r1, g1, b1 = hex2rgb(colors_orig[0]) | |
h1, s1, v1 = rgb2hsv(*hex2rgb(colors_orig[0])) | |
colors_positive = extract_color(images[2], 5, 1, None) | |
r2, g2, b2 = hex2rgb(colors_positive[0]) | |
h2, s2, v2 = rgb2hsv(*hex2rgb(colors_positive[0])) | |
if 's' in self.variable.lower(): | |
increase += max(0, s2 - s1) | |
elif 'v' in self.variable.lower(): | |
increase += max(0, v2 - v1) | |
elif 'r' in self.variable.lower(): | |
increase += max(0, r2 - r1) | |
elif 'g' in self.variable.lower(): | |
increase += max(0, g2 - g1) | |
elif 'b' in self.variable.lower(): | |
increase += max(0, b2 - b1) | |
else: | |
raise('Continous variable not allowed, choose between RGB or SV') | |
except Exception as e: | |
print(e) | |
return np.round(increase / samples, 2) | |
def continous_experiment(name, var, repo_folder, model, annotations, df, space, colors_list, kwargs): | |
scores = [] | |
print(f'Launching {name} experiment') | |
disentanglemnet_exp = DisentanglementBase(repo_folder, model, annotations, df, space=space, colors_list=colors_list, compute_s=False, variable=var, categorical=False) | |
for extr in kwargs['extremes']: | |
separation_vector = disentanglemnet_exp.InterFaceGAN_separation_vector() | |
print(f'Generating images with variations for {name}') | |
for s in range(30): | |
seed = random.randint(0,100000) | |
for eps in kwargs['max_lambda']: | |
disentanglemnet_exp.generate_changes(seed, separation_vector, min_epsilon=-eps, max_epsilon=eps, savefig=True, feature=name, method= 'InterFaceGAN_' + str(extr)) | |
print('Finally obtaining verification score') | |
for verif in kwargs['lambda_verif']: | |
score = disentanglemnet_exp.get_verification_score(separation_vector, 0, samples=kwargs['samples'], lambd=verif, savefig=False, feature=name, method='InterFaceGAN_' + str(extr)) | |
print(f'Score for method InterfaceGAN on {name}:', score) | |
scores.append([space, 'InterFaceGAN', name, score, 'extremes method:' + str(extr) + 'verification lambda:' + str(verif), ', '.join(list(separation_vector.astype(str)))]) | |
score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector']) | |
print(score_df) | |
score_df.to_csv(join(repo_folder, f'data/scores_{name}.csv')) | |
def main(): | |
repo_folder = '.' | |
annotations_file = join(repo_folder, 'data/textile_annotated_files/seeds0000-100000_S.pkl') | |
with open(annotations_file, 'rb') as f: | |
annotations = pickle.load(f) | |
df_file = join(repo_folder, 'data/textile_annotated_files/top_three_colours.csv') | |
df = pd.read_csv(df_file).fillna('#000000') | |
model_file = join(repo_folder, 'data/textile_model_files/network-snapshot-005000.pkl') | |
with dnnlib.util.open_url(model_file) as f: | |
model = legacy.load_network_pkl(f)['G_ema'] # type: ignore | |
colors_list = ['Red', 'Orange', 'Yellow', 'Yellow Green', 'Chartreuse Green', | |
'Kelly Green', 'Green Blue Seafoam', 'Cyan Blue', | |
'Warm Blue', 'Indigo', 'Purple Magenta', 'Magenta Pink'] | |
colors_list = ['Gray', 'Red Orange', 'Yellow', 'Green', 'Light Blue', | |
'Blue', 'Purple', 'Pink'] | |
scores = [] | |
kwargs = {'CL method':['LR', 'SVM'], 'C':[0.1, 1], 'sign':[True, False], | |
'num_factors':[1, 5, 10, 20], 'cutout': [None], 'max_lambda':[18, 6], | |
'samples':30, 'lambda_verif':[14, 7], 'extremes':[True, False]} | |
continuous = False | |
specific_examples = [53139, 99376, 16, 99585, 40851, 70, 17703, 44, 52628, | |
99884, 52921, 46180, 19995, 40920, 554] | |
if specific_examples is not None: | |
disentanglemnet_exp = DisentanglementBase(repo_folder, model, annotations, df, space='w', colors_list=colors_list, compute_s=False) | |
separation_vectors = disentanglemnet_exp.StyleSpace_separation_vector(sign=True, num_factors=10, cutout=None) | |
# separation_vectors = disentanglemnet_exp.InterFaceGAN_separation_vector(method='LR', C=0.1) | |
for specific_example in specific_examples: | |
seed = specific_example | |
for i, color in enumerate(colors_list): | |
disentanglemnet_exp.generate_changes(seed, separation_vectors[i], min_epsilon=-9, max_epsilon=9, savefig=True, save_separately=True, feature=color, method='StyleSpace' + '_' + str(True) + '_' + str(10) + '_' + str(None)) | |
return | |
for space in ['w', ]: #'z', 's' | |
print('Launching experiment with space:', space) | |
if continuous: | |
continous_experiment('Saturation', 'S1', repo_folder, model, annotations, df, space, colors_list, kwargs) | |
continous_experiment('Value', 'V1', repo_folder, model, annotations, df, space, colors_list, kwargs) | |
continous_experiment('Red', 'R1', repo_folder, model, annotations, df, space, colors_list, kwargs) | |
continous_experiment('Green', 'G1', repo_folder, model, annotations, df, space, colors_list, kwargs) | |
continous_experiment('Blue', 'B1', repo_folder, model, annotations, df, space, colors_list, kwargs) | |
break | |
print('Launching Hue experiment') | |
variable = 'H1' | |
disentanglemnet_exp = DisentanglementBase(repo_folder, model, annotations, df, space=space, colors_list=colors_list, compute_s=False, variable=variable) | |
for method in ['StyleSpace', 'InterFaceGAN',]: #'GANSpace' | |
if space != 's' and method == 'InterFaceGAN': | |
print('Now obtaining separation vector for using InterfaceGAN') | |
for met in kwargs['CL method']: | |
for c in kwargs['C']: | |
separation_vectors = disentanglemnet_exp.InterFaceGAN_separation_vector(method=met, C=c) | |
for i, color in enumerate(colors_list): | |
print(f'Generating images with variations for color {color}') | |
for s in range(30): | |
seed = random.randint(0,100000) | |
for eps in kwargs['max_lambda']: | |
disentanglemnet_exp.generate_changes(seed, separation_vectors[i], min_epsilon=-eps, max_epsilon=eps, savefig=True, feature=color, method=str(method) + '_' + str(met) + '_' + str(c) + '_' + str(len(colors_list)) + '_' + str(variable)) | |
print('Finally obtaining verification score') | |
for verif in kwargs['lambda_verif']: | |
score = disentanglemnet_exp.get_verification_score(separation_vectors[i], i, samples=kwargs['samples'], lambd=verif, savefig=False, feature=color, method=method) | |
print('Score for method', method, 'on space', space, 'for color', color, ':', score) | |
scores.append([space, method, color, score, 'classification method:' + met + ', regularization: ' + str(c) + ', verification lambda:' + str(verif), ', '.join(list(separation_vectors[i].astype(str)))]) | |
score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector']) | |
print(score_df) | |
score_df.to_csv(join(repo_folder, f'data/scores_InterfaceGAN_{variable}_{len(colors_list)}.csv')) | |
elif method == 'StyleSpace': | |
print('Now obtaining separation vector for using StyleSpace') | |
for sign in kwargs['sign']: | |
for num_factors in kwargs['num_factors']: | |
for cutout in kwargs['cutout']: | |
separation_vectors = disentanglemnet_exp.StyleSpace_separation_vector(sign=sign, num_factors=num_factors, cutout=cutout) | |
for i, color in enumerate(colors_list): | |
print(f'Generating images with variations for color {color}') | |
for s in range(30): | |
seed = random.randint(0,100000) | |
for eps in kwargs['max_lambda']: | |
disentanglemnet_exp.generate_changes(seed, separation_vectors[i], min_epsilon=-eps, max_epsilon=eps, savefig=True, feature=color, method=method + '_' + str(num_factors) + '_' + str(cutout) + '_' + str(sign) + '_' + str(len(colors_list)) + '_' + str(variable)) | |
print('Finally obtaining verification score') | |
for verif in kwargs['lambda_verif']: | |
score = disentanglemnet_exp.get_verification_score(separation_vectors[i], i, samples=kwargs['samples'], lambd=verif, savefig=False, feature=color, method=method) | |
print('Score for method', method, 'on space', space, 'for color', color, ':', score) | |
scores.append([space, method, color, score, 'using sign:' + str(sign) + ', number of factors: ' + str(num_factors) + ', using cutout: ' + str(cutout) + ', verification lambda:' + str(verif), ', '.join(list(separation_vectors[i].astype(str)))]) | |
score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector']) | |
print(score_df) | |
score_df.to_csv(join(repo_folder, f'data/scores_StyleSpace_{variable}_{len(colors_list)}.csv')) | |
if space == 'w' and method == 'GANSpace': | |
print('Now obtaining separation vector for using GANSpace') | |
separation_vectors = disentanglemnet_exp.GANSpace_separation_vectors(100) | |
print(separation_vectors.shape) | |
for s in range(30): | |
print('Generating images with variations') | |
seed = random.randint(0,100000) | |
for i in range(100): | |
for eps in kwargs['max_lambda']: | |
disentanglemnet_exp.generate_changes(seed, separation_vectors.T[i], min_epsilon=-eps, max_epsilon=eps, savefig=True, feature='dimension_' + str(i), method=method) | |
score = None | |
scores.append([space, method, 'PCA', score, '100', ', '.join(list(separation_vectors.T[i].astype(str)))]) | |
else: | |
print('Skipping', method, 'on space', space) | |
continue | |
score_df = pd.DataFrame(scores, columns=['space', 'method', 'color', 'score', 'kwargs', 'vector']) | |
print(score_df) | |
score_df.to_csv(join(repo_folder, 'data/scores_{}.csv'.format(pd.to_datetime.now().strftime("%Y-%m-%d_%H%M%S")))) | |
if __name__ == "__main__": | |
main() |