what-convnets-learn / app_utils.py
vdprabhu's picture
Update app_utils.py
cee7bf9
import streamlit as st
# Setting the states
def initialize_states():
# Streamlit state variables
if "model_name" not in st.session_state:
st.session_state.model_name = None
if "layer_name" not in st.session_state:
st.session_state.layer_name = None
if "layer_list" not in st.session_state:
st.session_state.layer_list = None
if "model" not in st.session_state:
st.session_state.model = None
if "feat_extract" not in st.session_state:
st.session_state.feat_extract = None
# Strings
replicate = ":bulb: Choose **ResNet50V2** model and **conv3_block4_out** to get the results as in the example."
credits = ":memo: [Keras example](https://keras.io/examples/vision/visualizing_what_convnets_learn/) by [@fchollet](https://twitter.com/fchollet)."
vit_info = ":star: For Vision Transformers, check the excellent [probing-vits](https://huggingface.co/probing-vits) space."
title = "Visualizing What Convnets Learn"
info_text = """
Models in this demo are pre-trained on the ImageNet dataset.
The simple visualization process involves creation of input images that maximize the activation of specific filters in a target layer.
Such images represent a visualization of the pattern that the filter responds to.
"""
self_credit = "Space by Vrinda Prabhu"
# Constants and globals
IMG_WIDTH = 180
IMG_HEIGHT = 180
VIS_OPTION = {"only the first filter": 0, "the first 64 filters": 64}
ITERATIONS = 30
LEARNING_RATE = 10.0