# State dict keys and shapes for an XLabs FLUX ControlNet model. Intended to be used for unit tests.
# These keys were extracted from:
# https://huggingface.co/XLabs-AI/flux-controlnet-collections/blob/86ab1e915a389d5857135c00e0d350e9e38a9048/flux-canny-controlnet_v2.safetensors
xlabs_sd_shapes = {
    "controlnet_blocks.0.bias": [3072],
    "controlnet_blocks.0.weight": [3072, 3072],
    "controlnet_blocks.1.bias": [3072],
    "controlnet_blocks.1.weight": [3072, 3072],
    "double_blocks.0.img_attn.norm.key_norm.scale": [128],
    "double_blocks.0.img_attn.norm.query_norm.scale": [128],
    "double_blocks.0.img_attn.proj.bias": [3072],
    "double_blocks.0.img_attn.proj.weight": [3072, 3072],
    "double_blocks.0.img_attn.qkv.bias": [9216],
    "double_blocks.0.img_attn.qkv.weight": [9216, 3072],
    "double_blocks.0.img_mlp.0.bias": [12288],
    "double_blocks.0.img_mlp.0.weight": [12288, 3072],
    "double_blocks.0.img_mlp.2.bias": [3072],
    "double_blocks.0.img_mlp.2.weight": [3072, 12288],
    "double_blocks.0.img_mod.lin.bias": [18432],
    "double_blocks.0.img_mod.lin.weight": [18432, 3072],
    "double_blocks.0.txt_attn.norm.key_norm.scale": [128],
    "double_blocks.0.txt_attn.norm.query_norm.scale": [128],
    "double_blocks.0.txt_attn.proj.bias": [3072],
    "double_blocks.0.txt_attn.proj.weight": [3072, 3072],
    "double_blocks.0.txt_attn.qkv.bias": [9216],
    "double_blocks.0.txt_attn.qkv.weight": [9216, 3072],
    "double_blocks.0.txt_mlp.0.bias": [12288],
    "double_blocks.0.txt_mlp.0.weight": [12288, 3072],
    "double_blocks.0.txt_mlp.2.bias": [3072],
    "double_blocks.0.txt_mlp.2.weight": [3072, 12288],
    "double_blocks.0.txt_mod.lin.bias": [18432],
    "double_blocks.0.txt_mod.lin.weight": [18432, 3072],
    "double_blocks.1.img_attn.norm.key_norm.scale": [128],
    "double_blocks.1.img_attn.norm.query_norm.scale": [128],
    "double_blocks.1.img_attn.proj.bias": [3072],
    "double_blocks.1.img_attn.proj.weight": [3072, 3072],
    "double_blocks.1.img_attn.qkv.bias": [9216],
    "double_blocks.1.img_attn.qkv.weight": [9216, 3072],
    "double_blocks.1.img_mlp.0.bias": [12288],
    "double_blocks.1.img_mlp.0.weight": [12288, 3072],
    "double_blocks.1.img_mlp.2.bias": [3072],
    "double_blocks.1.img_mlp.2.weight": [3072, 12288],
    "double_blocks.1.img_mod.lin.bias": [18432],
    "double_blocks.1.img_mod.lin.weight": [18432, 3072],
    "double_blocks.1.txt_attn.norm.key_norm.scale": [128],
    "double_blocks.1.txt_attn.norm.query_norm.scale": [128],
    "double_blocks.1.txt_attn.proj.bias": [3072],
    "double_blocks.1.txt_attn.proj.weight": [3072, 3072],
    "double_blocks.1.txt_attn.qkv.bias": [9216],
    "double_blocks.1.txt_attn.qkv.weight": [9216, 3072],
    "double_blocks.1.txt_mlp.0.bias": [12288],
    "double_blocks.1.txt_mlp.0.weight": [12288, 3072],
    "double_blocks.1.txt_mlp.2.bias": [3072],
    "double_blocks.1.txt_mlp.2.weight": [3072, 12288],
    "double_blocks.1.txt_mod.lin.bias": [18432],
    "double_blocks.1.txt_mod.lin.weight": [18432, 3072],
    "guidance_in.in_layer.bias": [3072],
    "guidance_in.in_layer.weight": [3072, 256],
    "guidance_in.out_layer.bias": [3072],
    "guidance_in.out_layer.weight": [3072, 3072],
    "img_in.bias": [3072],
    "img_in.weight": [3072, 64],
    "input_hint_block.0.bias": [16],
    "input_hint_block.0.weight": [16, 3, 3, 3],
    "input_hint_block.10.bias": [16],
    "input_hint_block.10.weight": [16, 16, 3, 3],
    "input_hint_block.12.bias": [16],
    "input_hint_block.12.weight": [16, 16, 3, 3],
    "input_hint_block.14.bias": [16],
    "input_hint_block.14.weight": [16, 16, 3, 3],
    "input_hint_block.2.bias": [16],
    "input_hint_block.2.weight": [16, 16, 3, 3],
    "input_hint_block.4.bias": [16],
    "input_hint_block.4.weight": [16, 16, 3, 3],
    "input_hint_block.6.bias": [16],
    "input_hint_block.6.weight": [16, 16, 3, 3],
    "input_hint_block.8.bias": [16],
    "input_hint_block.8.weight": [16, 16, 3, 3],
    "pos_embed_input.bias": [3072],
    "pos_embed_input.weight": [3072, 64],
    "time_in.in_layer.bias": [3072],
    "time_in.in_layer.weight": [3072, 256],
    "time_in.out_layer.bias": [3072],
    "time_in.out_layer.weight": [3072, 3072],
    "txt_in.bias": [3072],
    "txt_in.weight": [3072, 4096],
    "vector_in.in_layer.bias": [3072],
    "vector_in.in_layer.weight": [3072, 768],
    "vector_in.out_layer.bias": [3072],
    "vector_in.out_layer.weight": [3072, 3072],
}
