",
unsafe_allow_html=True,
)
page_names = ["CLIP Guided Diffusion", "VQGAN+CLIP"]
if "width" not in st.session_state:
st.session_state["width"] = 512
if "height" not in st.session_state:
st.session_state["height"] = 512
if "seed" not in st.session_state:
init_seed = int(random.randint(0, 2147483647))
st.session_state.seed = init_seed
else:
init_seed = st.session_state.seed
if "user_input" not in st.session_state:
st.session_state.user_input = "An açai bowl"
user_input = st.text_input(
"A text prompt to generate your image",
st.session_state.user_input,
help="The text you type here will be used for the AI to generate an image based on it. If you want multiple prompts you can separate them via a `|`, so if you want `sun|moon` it will try to utilize both. If you want that they are weighted differently, you can use `:` with the following syntax `sun:1|moon:2`, here `moon` will have 2x the weight of `sun`",
)
def add_to_prompt(text):
global user_input
st.session_state.user_input = user_input + " " + text
def dimensions_compatibility(type, after):
if type == "width":
dimension = st.session_state.width
elif type == "height":
dimension = st.session_state.height
if dimension % 64 != 0:
dimension = 64 * round(dimension / 64)
if type == "width":
st.session_state.width = dimension
elif type == "height":
st.session_state.height = dimension
after.info("We rounded your value to a multiple of 64")
enhancers = st.expander("Prompt enhancers (optional)")
with enhancers:
st.write(
"Adding enhancers to your prompts can produce very different results to the generated images, the few below are some some examples that can produce interesting results. Here you can learn more about Prompt Engineering",
unsafe_allow_html=True,
)
with st.container():
listofenhancers = [
"trending on artstation",
"in ukiyo-e style",
", oil on canvas",
"in chinese watercolor style",
"in art deco style",
"8k resolution",
"unreal engine 4k",
"cgsociety",
", pencil sketch",
", children's drawing",
"by Van Gogh",
"by Dan Mumford",
"1995",
", an abstract sculpture",
" anime style",
"in soviet propaganda style",
", persian miniature painting",
"lens flare",
"high quality",
]
for enhancer in listofenhancers:
st.button(enhancer, on_click=add_to_prompt, args=(enhancer,))
# Soon add a media input for image/audio as prompts
# col2_input.write('and/or')
# user_input_image = col3_input.file_uploader("A media prompt (image or audio file)",type=["png","jpg","wav"],help="Media prompts only work with VQGAN-CLIP for now (Guided Diffusion coming soon)")
# st.write('',
unsafe_allow_html=True,
)
model.run_model(args, status, stoutput, DefaultPaths)
gray_during_execution.markdown(
'',
unsafe_allow_html=True,
)
col_output1, col_output2 = st.columns(2)
def intermediary_frame_setup(seed):
if intermediary_frames:
intermediary_folder = f"{DefaultPaths.output_path}/{sanitize_filename(user_input)} [{sub_model}] {int(seed)}_frames"
if not path_exists(intermediary_folder):
os.makedirs(intermediary_folder)
update_every = how_many_frames
else:
intermediary_folder = None
update_every = 10
return intermediary_folder, update_every
gray_during_execution = st.empty()
if submit:
meta_status = col_output1.empty()
status = col_output1.empty()
if uploaded_file is not None:
image_data = uploaded_file.read()
f = open(uploaded_file.name, "wb")
f.write(image_data)
f.close()
image_path = uploaded_file.name
else:
image_path = None
intermediary_folder, update_every = intermediary_frame_setup(seed)
if page == "VQGAN+CLIP":
args = argparse.Namespace(
prompt=user_input,
seed=int(seed),
sizex=width,
sizey=height,
flavor=flavor,
iterations=num_steps,
mse=True,
update=update_every,
template=template,
clip_model_1="ViT-B/32" if "ViT-B/32" in clip_model else None,
clip_model_2="ViT-B/16" if "ViT-B/16" in clip_model else None,
clip_model_3="ViT-L/14" if "ViT-L/14" in clip_model else None,
clip_model_4="RN50x64" if "RN50x64" in clip_model else None,
clip_model_5="RN50x16" if "RN50x16" in clip_model else None,
clip_model_6="RN50x4" if "RN50x4" in clip_model else None,
clip_model_7="RN50" if "RN50" in clip_model else None,
clip_model_8="RN101" if "RN101" in clip_model else None,
vqgan_model=vqgan_model,
seed_image=image_path,
image_file="progress.png",
frame_dir=intermediary_folder,
sub_model=sub_model,
)
elif page == "CLIP Guided Diffusion":
args = argparse.Namespace(
prompt=user_input,
seed=int(seed),
iterations=num_steps,
update=update_every,
sizex=width,
sizey=height,
cutn=cutouts,
cutnbatches=cutout_batches,
tvscale=tv_scale,
rangescale=range_scale,
guidancescale=guidance_scale,
saturationscale=sat_scale,
skipseedtimesteps=skipseedtimesteps,
usevit32="ViT-B/32" in clip_model,
usevit16="ViT-B/16" in clip_model,
usevit14="ViT-L/14" in clip_model,
usern50x4="RN50x4" in clip_model,
usern50x16="RN50x16" in clip_model,
usern50x64="RN50x64" in clip_model,
usern50="RN50" in clip_model,
usern101="RN101" in clip_model,
useslipbase=False,
usesliplarge=False,
use256=False,
denoised=False,
useaugs=use_augs,
secondarymodel=secondary_model,
clampmax=clamp_max,
dango=True,
# ddim=False,
sampling_mode=sampling_mode,
eta=eta,
image_file="progress.png",
init_image=None,
clip_model=clip_model,
image_prompts=[],
seed_image=image_path,
initscale=initscale,
frame_dir=intermediary_folder,
animation_mode="None",
max_frames=10000,
angle="0:(0)",
zoom="0: (1), 10: (1.05)",
translation_x="0: (0)",
translation_y="0: (0)",
translation_z="0: (10.0)",
rotation_3d_x="0: (0)",
rotation_3d_y="0: (0)",
rotation_3d_z="0: (0)",
midas_weight=0.3,
near_plane=200,
far_plane=10000,
fov=40,
frames_scale=1500,
frames_skip_steps="60%",
turbo_mode=False,
turbo_steps=3,
perlin_init=0,
perlin_mode="mixed",
sub_model=sub_model,
)
try:
if (how_many_runs) > 1:
if batch_folder:
DefaultPaths.output_path = f"{DefaultPaths.output_path}/{batch_folder}"
intermediary_folder, update_every = intermediary_frame_setup(args.seed)
for i in range(how_many_runs):
if how_many_runs > 1:
meta_status.write(f"Running batch, {i+1} out of {how_many_runs}")
if i > 0:
if randomize_seed:
args.seed = random.randint(0, 2147483647)
intermediary_folder, update_every = intermediary_frame_setup(args.seed)
args.frame_dir = intermediary_folder
run_internal(args, status, col_output1, gray_during_execution)
gc.collect()
torch.cuda.empty_cache()
except st.script_runner.StopException as e:
status.empty()
gc.collect()
torch.cuda.empty_cache()
gray_during_execution.markdown(
'',
unsafe_allow_html=True,
)
pass
init_seed = int(random.randint(0, 2147483647))
st.session_state.seed = init_seed
meta_status.empty()
st.experimental_rerun()
with col_output2:
gallery_text_area = st.empty()
gallery_image_area = st.empty()
if DefaultPaths.is_drive:
output_folder = f"{DefaultPaths.drive_path}/outputs"
if os.listdir(output_folder):
files_path = os.path.join(output_folder, "*.png")
files = sorted(glob.iglob(files_path), key=os.path.getctime, reverse=True)
gallery_text_area.write("Welcome back! Your last creation:")
gallery_image_area.image(Image.open(files[0]))
st.write(
f'
We do not collect prompts or results. Your creations don\'t belong to MindsEye. Read our FAQ. Feel free to reference #MindsEye and tag @multimodalart when sharing your creations if you wish
',
unsafe_allow_html=True,
)
else:
st.write(
f'
We do not collect prompts or results. Your creations don\'t belong to MindsEye. Read our FAQ. Feel free to reference #MindsEye and tag @multimodalart when sharing your creations if you wish
',
unsafe_allow_html=True,
)
if os.path.exists("progress.png"):
gallery_text_area.write("Your last creation:")
gallery_image_area.image(Image.open("progress.png"))
footer = """
"""
st.markdown(footer, unsafe_allow_html=True)