{ "_name_or_path": "LinWeizheDragon/PreFLMR_ViT-L", "architectures": [ "FLMRModelForRetrieval" ], "auto_map": { "AutoConfig": "configuration_flmr.FLMRConfig", "AutoModel": "modeling_flmr.FLMRModelForRetrieval" }, "context_concat_output_from_text_encoder": true, "context_concat_output_from_vision_encoder": false, "dim": 128, "initializer_range": 0.02, "load_cpu_extension": false, "mapping_network_prefix_length": 32, "mask_instruction_token": ":", "mask_punctuation": true, "model_type": "flmr", "query_concat_output_from_text_encoder": true, "query_concat_output_from_vision_encoder": true, "separate_query_and_context_text_encoder": true, "separate_query_and_context_vision_encoder": false, "text_config": { "architectures": [ "BertForMaskedLM" ], "gradient_checkpointing": false, "model_type": "flmr_text_model", "use_cache": true }, "torch_dtype": "float32", "transformer_mapping_config_base": "bert-base-uncased", "transformer_mapping_cross_attention_length": 32, "transformer_mapping_num_hidden_layers": 1, "transformers_version": "4.37.2", "use_transformer_mapping_network": true, "use_vision_encoder": true, "vision_config": { "dropout": 0.0, "hidden_size": 1024, "intermediate_size": 4096, "model_type": "flmr_vision_model", "num_attention_heads": 16, "num_hidden_layers": 24, "patch_size": 14, "projection_dim": 768 }, "vision_model_version": "openai/clip-vit-large-patch14" }