Spaces:
Runtime error
Runtime error
File size: 6,938 Bytes
973a4da 5b6e083 973a4da 5b6e083 973a4da 944e312 7e9d843 944e312 7e9d843 944e312 7e9d843 944e312 973a4da 5b6e083 944e312 973a4da 944e312 973a4da 944e312 1253aaa 6e0b239 944e312 973a4da 944e312 973a4da 944e312 973a4da 944e312 6e0b239 944e312 6e0b239 944e312 973a4da 944e312 973a4da 944e312 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
import streamlit as st
import pickle
import pandas as pd
import numpy as np
import random
import torch
from matplotlib.backends.backend_agg import RendererAgg
from backend.disentangle_concepts import *
import torch_utils
import dnnlib
import legacy
_lock = RendererAgg.lock
st.set_page_config(layout='wide')
BACKGROUND_COLOR = '#bcd0e7'
SECONDARY_COLOR = '#bce7db'
st.title('Disentanglement studies on the Textile Dataset')
st.markdown(
"""
This is a demo of the Disentanglement studies on the [iMET Textiles Dataset](https://www.metmuseum.org/art/collection/search/85531).
""",
unsafe_allow_html=False,)
annotations_file = './data/textile_annotated_files/seeds0000-100000_S.pkl'
with open(annotations_file, 'rb') as f:
annotations = pickle.load(f)
concept_vectors = pd.read_csv('./data/stored_vectors/scores_colors_hsv.csv')
concept_vectors['vector'] = [np.array([float(xx) for xx in x]) for x in concept_vectors['vector'].str.split(', ')]
concept_vectors['score'] = concept_vectors['score'].astype(float)
concept_vectors = concept_vectors.sort_values('score', ascending=False).reset_index()
print(concept_vectors[['vector', 'score']], concept_vectors.loc[0, 'vector'], concept_vectors.loc[0, 'vector'].shape)
with dnnlib.util.open_url('./data/textile_model_files/network-snapshot-005000.pkl') as f:
model = legacy.load_network_pkl(f)['G_ema'].to('cpu') # type: ignore
COLORS_LIST = ['Gray', 'Red Orange', 'Yellow', 'Green', 'Light Blue', 'Blue', 'Purple', 'Pink']
if 'image_id' not in st.session_state:
st.session_state.image_id = 0
if 'color_ids' not in st.session_state:
st.session_state.concept_ids = COLORS_LIST[-1]
if 'space_id' not in st.session_state:
st.session_state.space_id = 'W'
# def on_change_random_input():
# st.session_state.image_id = st.session_state.image_id
# ----------------------------- INPUT ----------------------------------
st.header('Input')
input_col_1, input_col_2, input_col_3 = st.columns(3)
# --------------------------- INPUT column 1 ---------------------------
with input_col_1:
with st.form('image_form'):
# image_id = st.number_input('Image ID: ', format='%d', step=1)
st.write('**Choose or generate a random image to test the disentanglement**')
chosen_image_id_input = st.empty()
image_id = chosen_image_id_input.number_input('Image ID:', format='%d', step=1, value=st.session_state.image_id)
choose_image_button = st.form_submit_button('Choose the defined image')
random_id = st.form_submit_button('Generate a random image')
if random_id:
image_id = random.randint(0, 100000)
st.session_state.image_id = image_id
chosen_image_id_input.number_input('Image ID:', format='%d', step=1, value=st.session_state.image_id)
if choose_image_button:
image_id = int(image_id)
st.session_state.image_id = int(image_id)
with input_col_2:
with st.form('text_form'):
st.write('**Choose color to vary**')
type_col = st.selectbox('Color:', tuple(COLORS_LIST))
colors_button = st.form_submit_button('Choose the defined color')
st.write('**Set range of change**')
chosen_color_lambda_input = st.empty()
color_lambda = chosen_color_lambda_input.number_input('Lambda:', min_value=0, step=1, value=7)
color_lambda_button = st.form_submit_button('Choose the defined lambda')
if choose_text_button:
st.session_state.concept_ids = type_col
st.session_state.space_id = space_id
with input_col_3:
with st.form('text_form'):
st.write('**Saturation variation**')
chosen_saturation_lambda_input = st.empty()
saturation_lambda = chosen_saturation_lambda_input.number_input('Lambda:', min_value=0, step=1)
saturation_lambda_button = st.form_submit_button('Choose the defined lambda')
st.write('**Value variation**')
chosen_value_lambda_input = st.empty()
value_lambda = chosen_value_lambda_input.number_input('Lambda:', min_value=0, step=1)
value_lambda_button = st.form_submit_button('Choose the defined lambda')
# with input_col_4:
# with st.form('Network specifics:'):
# st.write('**Choose a latent space to use**')
# space_id = st.selectbox('Space:', tuple(['W']))
# choose_text_button = st.form_submit_button('Choose the defined concept and space to disentangle')
# st.write('**Select hierarchical levels to manipulate**')
# layers = st.multiselect('Layers:', tuple(range(14)))
# if len(layers) == 0:
# layers = None
# print(layers)
# layers_button = st.form_submit_button('Choose the defined layers')
# ---------------------------- SET UP OUTPUT ------------------------------
epsilon_container = st.empty()
st.header('Image Manipulation')
st.subheader('Using selected directions')
header_col_1, header_col_2 = st.columns([1,1])
output_col_1, output_col_2 = st.columns([1,1])
# # prediction error container
# error_container = st.empty()
# smoothgrad_header_container = st.empty()
# # smoothgrad container
# smooth_head_1, smooth_head_2, = st.columns([1,1,])
# smoothgrad_col_1, smoothgrad_col_2 = st.columns([1,1])
# ---------------------------- DISPLAY COL 1 ROW 1 ------------------------------
with header_col_1:
st.write(f'Original image')
with header_col_2:
color_separation_vector, performance_color = concept_vectors[concept_vectors['color'] == st.session_state.concept_ids].loc[0, ['vector', 'score']]
saturation_separation_vector, performance_saturation = concept_vectors[concept_vectors['color'] == 'Saturation'].loc[0, ['vector', 'score']]
value_separation_vector, performance_value = concept_vectors[concept_vectors['color'] == 'Value'].loc[0, ['vector', 'score']]
st.write(f'Change in {st.session_state.concept_ids} of {np.round(color_lambda, 2)}, in saturation of {np.round(saturation_lambda, 2)}, in value of {np.round(value_lambda, 2)}. - Performance color vector: {performance_color}, saturation vector: {performance_saturation}, value vector: {performance_value}')
# ---------------------------- DISPLAY COL 2 ROW 1 ------------------------------
if st.session_state.space_id == 'Z':
original_image_vec = annotations['z_vectors'][st.session_state.image_id]
else:
original_image_vec = annotations['w_vectors'][st.session_state.image_id]
img = generate_original_image(original_image_vec, model, latent_space=st.session_state.space_id)
with output_col_1:
st.image(img)
with output_col_2:
image_updated = generate_composite_images(model, original_image_vec, [separation_vector_color, saturation_separation_vector, value_separation_vector], lambdas=[color_lambda, saturation_lambda, value_lambda])
st.image(image_updated)
|