Spaces:
Runtime error
Runtime error
#!/usr/bin/env python | |
import numpy as np | |
import pandas as pd | |
from sklearn.svm import SVC | |
from sklearn.decomposition import PCA | |
from sklearn.linear_model import LogisticRegression, LinearRegression | |
from sklearn.model_selection import train_test_split | |
from tqdm import tqdm | |
import random | |
from os.path import join | |
import os | |
import pickle | |
import torch | |
import matplotlib.pyplot as plt | |
import PIL | |
from PIL import Image, ImageColor | |
import sys | |
sys.path.append('backend') | |
from color_annotations import extract_color | |
from networks_stylegan3 import * | |
sys.path.append('.') | |
import dnnlib | |
import legacy | |
def hex2rgb(hex_value): | |
h = hex_value.strip("#") | |
rgb = tuple(int(h[i:i+2], 16) for i in (0, 2, 4)) | |
return rgb | |
def rgb2hsv(r, g, b): | |
# Normalize R, G, B values | |
r, g, b = r / 255.0, g / 255.0, b / 255.0 | |
# h, s, v = hue, saturation, value | |
max_rgb = max(r, g, b) | |
min_rgb = min(r, g, b) | |
difference = max_rgb-min_rgb | |
# if max_rgb and max_rgb are equal then h = 0 | |
if max_rgb == min_rgb: | |
h = 0 | |
# if max_rgb==r then h is computed as follows | |
elif max_rgb == r: | |
h = (60 * ((g - b) / difference) + 360) % 360 | |
# if max_rgb==g then compute h as follows | |
elif max_rgb == g: | |
h = (60 * ((b - r) / difference) + 120) % 360 | |
# if max_rgb=b then compute h | |
elif max_rgb == b: | |
h = (60 * ((r - g) / difference) + 240) % 360 | |
# if max_rgb==zero then s=0 | |
if max_rgb == 0: | |
s = 0 | |
else: | |
s = (difference / max_rgb) * 100 | |
# compute v | |
v = max_rgb * 100 | |
# return rounded values of H, S and V | |
return tuple(map(round, (h, s, v))) | |
class DisentanglementBase: | |
def __init__(self, repo_folder, model, annotations, df, space, colors_list, compute_s=False, variable='H1', categorical=True): | |
self.device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
print('Using device', self.device) | |
self.repo_folder = repo_folder | |
self.model = model.to(self.device) | |
self.annotations = annotations | |
self.df = df | |
self.space = space | |
self.categorical = categorical | |
self.variable = variable | |
self.layers = ['input', 'L0_36_512', 'L1_36_512', 'L2_36_512', 'L3_52_512', | |
'L4_52_512', 'L5_84_512', 'L6_84_512', 'L7_148_512', 'L8_148_512', | |
'L9_148_362', 'L10_276_256', 'L11_276_181', 'L12_276_128', | |
'L13_256_128', 'L14_256_3'] | |
self.layers_shapes = [4, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 362, 256, 181, 128, 128] | |
self.decoding_layers = 16 | |
self.colors_list = colors_list | |
self.to_hsv() | |
if compute_s: | |
self.get_s_space() | |
def to_hsv(self): | |
""" | |
The tohsv function takes the top 3 colors of each image and converts them to HSV values. | |
It then adds these values as new columns in the dataframe. | |
:param self: Allow the function to access the dataframe | |
:return: The dataframe with the new columns added | |
:doc-author: Trelent | |
""" | |
print('Adding HSV encoding') | |
self.df['H1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0]) | |
self.df['H2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0]) | |
self.df['H3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[0]) | |
self.df['S1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1]) | |
self.df['S2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1]) | |
self.df['S3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[1]) | |
self.df['V1'] = self.df['top1col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2]) | |
self.df['V2'] = self.df['top2col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2]) | |
self.df['V3'] = self.df['top3col'].map(lambda x: rgb2hsv(*hex2rgb(x))[2]) | |
print('Adding RGB encoding') | |
self.df['R1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0]) | |
self.df['R2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0]) | |
self.df['R3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[0]) | |
self.df['G1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1]) | |
self.df['G2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1]) | |
self.df['G3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[1]) | |
self.df['B1'] = self.df['top1col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2]) | |
self.df['B2'] = self.df['top2col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2]) | |
self.df['B3'] = self.df['top3col'].map(lambda x: ImageColor.getcolor(x, 'RGB')[2]) | |
return self.df | |
def get_encoded_latent(self): | |
# ... (existing code for getX) | |
if self.space.lower() == 'w': | |
X = np.array(self.annotations['w_vectors']).reshape((len(self.annotations['w_vectors']), 512)) | |
elif self.space.lower() == 'z': | |
X = np.array(self.annotations['z_vectors']).reshape((len(self.annotations['z_vectors']), 512)) | |
elif self.space.lower() == 's': | |
concat_v = [] | |
for i in range(len(self.annotations['w_vectors'])): | |
concat_v.append(np.concatenate(self.annotations['s_vectors'][i], axis=1)) | |
X = np.array(concat_v) | |
X = X[:, 0, :] | |
else: | |
Exception("Sorry, option not available, select among Z, W, S") | |
print('Shape embedding:', X.shape) | |
return X | |
def get_train_val(self, extremes=False): | |
X = self.get_encoded_latent() | |
y = np.array(self.df[self.variable].values) | |
if self.categorical: | |
y_cat = pd.cut(y, | |
bins=[x * 360 / len(self.colors_list) if x < len(self.colors_list) | |
else 360 for x in range(len(self.colors_list) + 1)], | |
labels=self.colors_list | |
).fillna(self.colors_list[0]) | |
x_train, x_val, y_train, y_val = train_test_split(X, y_cat, test_size=0.2) | |
else: | |
if extremes: | |
# Calculate the number of elements to consider (10% of array size) | |
num_elements = int(0.2 * len(y)) | |
# Get indices of the top num_elements maximum values | |
top_indices = np.argpartition(array, -num_elements)[-num_elements:] | |
bottom_indices = np.argpartition(array, -num_elements)[:num_elements] | |
y_ext = y[top_indices + bottom_indices, :] | |
X_ext = X[top_indices + bottom_indices, :] | |
x_train, x_val, y_train, y_val = train_test_split(X_ext, y_ext, test_size=0.2) | |
else: | |
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2) | |
return x_train, x_val, y_train, y_val | |
def generate_orig_image(self, vec, seed=False): | |
""" | |
The generate_original_image function takes in a latent vector and the model, | |
and returns an image generated from that latent vector. | |
:param z: Generate the image | |
:param model: Generate the image | |
:return: A pil image | |
:doc-author: Trelent | |
""" | |
G = self.model.to(self.device) # type: ignore | |
# Labels. | |
label = torch.zeros([1, G.c_dim], device=self.device) | |
if seed: | |
seed = vec | |
vec = self.annotations['z_vectors'][seed] | |
Z = torch.from_numpy(vec.copy()).to(self.device) | |
img = G(Z, label, truncation_psi=1, noise_mode='const') | |
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) | |
img = PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB') | |
return img | |
def main(): | |
repo_folder = '.' | |
annotations_file = join(repo_folder, 'data/textile_annotated_files/seeds0000-100000_S.pkl') | |
with open(annotations_file, 'rb') as f: | |
annotations = pickle.load(f) | |
df_file = join(repo_folder, 'data/textile_annotated_files/top_three_colours.csv') | |
df = pd.read_csv(df_file).fillna('#000000') | |
model_file = join(repo_folder, 'data/textile_model_files/network-snapshot-005000.pkl') | |
with dnnlib.util.open_url(model_file) as f: | |
model = legacy.load_network_pkl(f)['G_ema'] # type: ignore | |
colors_list = ['Red', 'Orange', 'Yellow', 'Yellow Green', 'Chartreuse Green', | |
'Kelly Green', 'Green Blue Seafoam', 'Cyan Blue', | |
'Warm Blue', 'Indigo', 'Purple Magenta', 'Magenta Pink'] | |
colors_list = ['Red Orange', 'Yellow', 'Green', 'Light Blue', | |
'Blue', 'Purple', 'Pink'] | |
disentanglemnet_exp = DisentanglementBase(repo_folder, model, annotations, df, space='w', colors_list=colors_list) | |
# x_train, x_val, y_train, y_val = disentanglemnet_exp.get_train_val() | |
# print(colors_list) | |
# print(np.unique(y_train, return_counts=True)) | |
# for i, color in enumerate(colors_list): | |
# idxs = np.where(y_train == color) | |
# x_color = x_train[idxs][:30, :] | |
# print(x_color.shape) | |
# print('Generating images of color ' + color) | |
# for j, vec in enumerate(x_color): | |
# vec = np.expand_dims(vec, axis=0) | |
# img = disentanglemnet_exp.generate_orig_image(vec) | |
# img.save(f'{repo_folder}/colors_test/color_{color}_{j}.png') | |
df = disentanglemnet_exp.to_hsv() | |
df['color'] = pd.cut(df['H1'], | |
bins=[x * 360 / len(colors_list) if x < len(colors_list) | |
else 360 for x in range(len(colors_list) + 1)], | |
labels=colors_list | |
).fillna(colors_list[0]) | |
print(df['color'].value_counts()) | |
df['seed'] = df['fname'].str.split('/').apply(lambda x: x[-1]).str.replace('seed', '').str.replace('.png','').astype(int) | |
print(df[df['seed'] == 3][['H1', 'S1', 'V1', 'R1', 'B1', 'G1']]) | |
for i, color in enumerate(colors_list): | |
idxs = df['color'] == color | |
x_color = df['seed'][idxs][:30] | |
print('Generating images of color ' + color) | |
for j, vec in enumerate(x_color): | |
img = disentanglemnet_exp.generate_orig_image(int(vec), seed=True) | |
img.save(f'{repo_folder}/colors_test/color_{color}_{j}corrected.png') | |
if __name__ == "__main__": | |
main() |