[clip_list] perceptors = ["[clip - mlfoundations - ViT-B-16-plus-240--laion400m_e32]", "[clip - mlfoundations - ViT-B-16--openai]", "[clip - mlfoundations - ViT-B-32--openai]"] [basic_settings] latent_diffusion_guidance_scale = 8.85 clip_guidance_scale = 5000 aesthetic_loss_scale = 500 augment_cuts=True [advanced_settings] symmetric_loss_scale = 0.8 scale_div = 0.5 opt_ddim_eta = 1.2 opt_eta_end = 1.05 opt_temperature = .95 anti_jpg = 0.3 clamp_index = [.45]*1000 cut_overview = [8]*1000 cut_innercut = [2]*500 + [2]*500 cut_ic_pow = .2 cut_icgray_p = [.1]*300+[0]*1000 cutn_batches = 1 range_index = [0]*300 + [0]*1000 active_function = "softsign" # function to manipulate the gradient - help things to stablize tv_scales = [45]*1+[20]*3 #Deactivating new stuff from 1.4 score_modifier = False compress_steps = 0 punish_steps = 0