Transformers
PyTorch
flava
pretraining
Inference Endpoints
flava-full / preprocessor_config.json
aps's picture
Update according to latest comments on the main PR
06305f7
raw
history blame
881 Bytes
{
"codebook_crop_size": 112,
"codebook_do_center_crop": true,
"codebook_do_map_pixels": true,
"codebook_do_normalize": true,
"codebook_do_resize": true,
"codebook_image_mean": [
0.0,
0.0,
0.0
],
"codebook_image_std": [
1.0,
1.0,
1.0
],
"codebook_resample": 1,
"codebook_size": 112,
"crop_size": 224,
"do_center_crop": true,
"do_normalize": true,
"do_resize": true,
"feature_extractor_type": "FlavaFeatureExtractor",
"image_mean": [
0.48145466,
0.4578275,
0.40821073
],
"image_std": [
0.26862954,
0.26130258,
0.27577711
],
"input_size_patches": 14,
"mask_group_max_aspect_ratio": null,
"mask_group_max_patches": null,
"mask_group_min_aspect_ratio": 0.3,
"mask_group_min_patches": 16,
"processor_class": "FlavaProcessor",
"resample": 3,
"size": 224,
"total_mask_patches": 75
}