{ "api_key": null, "verify_url": "http://johnrachwan.pythonanywhere.com", "smash_config": { "pruners": "None", "pruning_ratio": 0.0, "factorizers": "None", "quantizers": "['half']", "n_quantization_bits": 32, "output_deviation": 0.01, "compilers": "['x-fast']", "static_batch": true, "static_shape": true, "controlnet": "None", "unet_dim": 4, "device": "cuda", "cache_dir": "/ceph/hdd/staff/charpent/.cache/modelsh9g6_hpn", "batch_size": 1, "model_name": "beitv2_large_patch16_224.in1k_ft_in1k", "max_batch_size": 1, "qtype_weight": "torch.qint8", "qtype_activation": "torch.quint8", "qobserver": "", "qscheme": "torch.per_tensor_symmetric", "qconfig": "x86", "group_size": 128, "damp_percent": 0.1, "save_dir": ".models/optimized_model", "fn_to_compile": "forward", "save_load_fn": "x-fast" } }