Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import numpy as np
|
3 |
+
import tensorflow as tf
|
4 |
+
from tensorflow.keras.applications import inception_v3
|
5 |
+
from tensorflow.keras.models import Model
|
6 |
+
from PIL import Image
|
7 |
+
import IPython.display as display
|
8 |
+
|
9 |
+
|
10 |
+
# Functions from your provided code
|
11 |
+
def load_image(image_path, max_dim):
|
12 |
+
img = Image.open(image_path)
|
13 |
+
img = img.convert("RGB")
|
14 |
+
img.thumbnail([max_dim, max_dim])
|
15 |
+
img = np.array(img, dtype=np.uint8)
|
16 |
+
img = np.expand_dims(img, axis=0)
|
17 |
+
return img
|
18 |
+
|
19 |
+
|
20 |
+
def deprocess_inception_image(img):
|
21 |
+
img = 255 * (img + 1) / 2
|
22 |
+
return np.array(img, np.uint8)
|
23 |
+
|
24 |
+
|
25 |
+
def array_to_img(array, deprocessing=False):
|
26 |
+
if deprocessing:
|
27 |
+
array = deprocess_inception_image(array)
|
28 |
+
|
29 |
+
if np.ndim(array) > 3:
|
30 |
+
assert array.shape[0] == 1
|
31 |
+
array = array[0]
|
32 |
+
|
33 |
+
return Image.fromarray(array)
|
34 |
+
|
35 |
+
|
36 |
+
def show_image(img):
|
37 |
+
image = array_to_img(img)
|
38 |
+
display.display(image)
|
39 |
+
|
40 |
+
|
41 |
+
def deep_dream_model(model, layer_names):
|
42 |
+
model.trainable = False
|
43 |
+
outputs = [model.get_layer(name).output for name in layer_names]
|
44 |
+
new_model = Model(inputs=model.input, outputs=outputs)
|
45 |
+
return new_model
|
46 |
+
|
47 |
+
|
48 |
+
def get_loss(activations):
|
49 |
+
loss = []
|
50 |
+
for activation in activations:
|
51 |
+
loss.append(tf.math.reduce_mean(activation))
|
52 |
+
return tf.reduce_sum(loss)
|
53 |
+
|
54 |
+
|
55 |
+
def model_output(model, inputs):
|
56 |
+
return model(inputs)
|
57 |
+
|
58 |
+
|
59 |
+
def get_loss_and_gradient(model, inputs, total_variation_weight=0):
|
60 |
+
with tf.GradientTape() as tape:
|
61 |
+
tape.watch(inputs)
|
62 |
+
activations = model_output(model, inputs)
|
63 |
+
loss = get_loss(activations)
|
64 |
+
loss = loss + total_variation_weight * tf.image.total_variation(inputs)
|
65 |
+
grads = tape.gradient(loss, inputs)
|
66 |
+
grads /= tf.math.reduce_std(grads) + 1e-8
|
67 |
+
return loss, grads
|
68 |
+
|
69 |
+
|
70 |
+
def run_gradient_ascent(model, inputs, epochs=1, steps_per_epoch=1, weight=0.05, total_variation_weight=0):
|
71 |
+
img = tf.convert_to_tensor(inputs)
|
72 |
+
for i in range(epochs):
|
73 |
+
for _ in range(steps_per_epoch):
|
74 |
+
_, grads = get_loss_and_gradient(model, img, total_variation_weight)
|
75 |
+
img = img + grads * weight
|
76 |
+
img = tf.clip_by_value(img, -1.0, 1.0)
|
77 |
+
|
78 |
+
return img.numpy()
|
79 |
+
|
80 |
+
|
81 |
+
centered_text = """
|
82 |
+
<div style="text-align: center;">
|
83 |
+
Built with ❤️ by Unnati
|
84 |
+
</div>
|
85 |
+
"""
|
86 |
+
|
87 |
+
# Streamlit App
|
88 |
+
st.title("Deep Dream Streamlit App")
|
89 |
+
st.write("Upload an image to generate mesmerising Deep Dream images. Adjust the parameters in the sidebar to get "
|
90 |
+
"different effects.")
|
91 |
+
st.write("Image generation may take a while depending on the parameters chosen, kindly be patient.")
|
92 |
+
|
93 |
+
# File uploader
|
94 |
+
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
|
95 |
+
|
96 |
+
# Checkboxes for selecting layers
|
97 |
+
st.sidebar.title("Adjust parameters for different effects!")
|
98 |
+
layer_checkboxes = []
|
99 |
+
for i in range(1, 11):
|
100 |
+
default_value = (i == 5) # Set default to True for layer 5, False for others
|
101 |
+
layer_checkbox = st.sidebar.checkbox(f"Layer {i}", value=default_value)
|
102 |
+
layer_checkboxes.append(layer_checkbox)
|
103 |
+
|
104 |
+
# Sliders for parameter adjustments
|
105 |
+
epochs = st.sidebar.slider("Epochs", 1, 5, 2, help="Number of training epochs")
|
106 |
+
steps_per_epoch = st.sidebar.slider("Steps per Epoch", 1, 100, 50, help="Number of steps per epoch")
|
107 |
+
weight = st.sidebar.slider("Weight", 0.01, 0.1, 0.02, step=0.01, help="Weight for gradient ascent")
|
108 |
+
|
109 |
+
if uploaded_file is not None:
|
110 |
+
# Load and preprocess the uploaded image
|
111 |
+
input_image = load_image(uploaded_file, max_dim=150)
|
112 |
+
preprocessed_image = inception_v3.preprocess_input(input_image)
|
113 |
+
|
114 |
+
# Create Inception model and modify for deep dream
|
115 |
+
inception = inception_v3.InceptionV3(weights="imagenet", include_top=False)
|
116 |
+
|
117 |
+
# Select layers based on user input
|
118 |
+
selected_layers = [f'mixed{i}' for i, checkbox in enumerate(layer_checkboxes, start=1) if checkbox]
|
119 |
+
dream_model = deep_dream_model(inception, selected_layers)
|
120 |
+
|
121 |
+
# Run gradient ascent
|
122 |
+
image_array = run_gradient_ascent(dream_model, preprocessed_image, epochs=epochs, steps_per_epoch=steps_per_epoch, weight=weight)
|
123 |
+
|
124 |
+
# Convert numpy arrays to PIL images
|
125 |
+
dream_pil_image = array_to_img(deprocess_inception_image(image_array))
|
126 |
+
|
127 |
+
# Display the Deep Dream image
|
128 |
+
st.image(dream_pil_image, caption='Deep Dream Image', width=300)
|
129 |
+
|
130 |
+
st.markdown("<hr>", unsafe_allow_html=True)
|
131 |
+
st.markdown(centered_text, unsafe_allow_html=True)
|