Jingya's picture
Jingya HF staff
Synchronizing local compiler cache.
526946f verified
raw
history blame
2.78 kB
{"model_type": "stable-diffusion", "text_encoder": {"architectures": ["CLIPTextModel"], "attention_dropout": 0.0, "dropout": 0.0, "hidden_act": "quick_gelu", "hidden_size": 768, "initializer_factor": 1.0, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-05, "max_position_embeddings": 77, "model_type": "clip_text_model", "neuron": {"auto_cast": "matmul", "auto_cast_type": "bf16", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "dynamic_batch_size": false, "inline_weights_to_neff": false, "optlevel": "2", "output_attentions": false, "output_hidden_states": false, "static_batch_size": 1, "static_sequence_length": 77}, "num_attention_heads": 12, "num_hidden_layers": 12, "task": "feature-extraction", "vocab_size": 49408}, "unet": {"_class_name": "UNet2DConditionModel", "act_fn": "silu", "addition_embed_type": null, "addition_embed_type_num_heads": 64, "addition_time_embed_dim": null, "attention_head_dim": 8, "attention_type": "default", "block_out_channels": [320, 640, 1280, 1280], "center_input_sample": false, "class_embed_type": null, "class_embeddings_concat": false, "conv_in_kernel": 3, "conv_out_kernel": 3, "cross_attention_dim": 768, "cross_attention_norm": null, "down_block_types": ["CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D"], "downsample_padding": 1, "dropout": 0.0, "dual_cross_attention": false, "encoder_hid_dim": null, "encoder_hid_dim_type": null, "flip_sin_to_cos": true, "freq_shift": 0, "in_channels": 4, "layers_per_block": 2, "mid_block_only_cross_attention": null, "mid_block_scale_factor": 1, "mid_block_type": "UNetMidBlock2DCrossAttn", "neuron": {"auto_cast": "matmul", "auto_cast_type": "bf16", "compiler_type": "neuronx-cc", "compiler_version": "2.13.66.0+6dfecc895", "dynamic_batch_size": false, "inline_weights_to_neff": false, "optlevel": "2", "output_attentions": false, "output_hidden_states": false, "static_batch_size": 4, "static_height": 96, "static_num_channels": 4, "static_sequence_length": 77, "static_width": 96}, "norm_eps": 1e-05, "norm_num_groups": 32, "num_attention_heads": null, "num_class_embeds": null, "only_cross_attention": false, "out_channels": 4, "projection_class_embeddings_input_dim": null, "resnet_out_scale_factor": 1.0, "resnet_skip_time_act": false, "resnet_time_scale_shift": "default", "reverse_transformer_layers_per_block": null, "task": "semantic-segmentation", "time_cond_proj_dim": null, "time_embedding_act_fn": null, "time_embedding_dim": null, "time_embedding_type": "positional", "timestep_post_act": null, "transformer_layers_per_block": 1, "up_block_types": ["UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"], "upcast_attention": false, "use_linear_projection": false}}