File size: 1,085 Bytes
92a67f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
{
    "api_key": null,
    "verify_url": "http://johnrachwan.pythonanywhere.com",
    "smash_config": {
        "pruners": "None",
        "pruning_ratio": 0.0,
        "factorizers": "None",
        "quantizers": "['half']",
        "n_quantization_bits": 32,
        "output_deviation": 0.01,
        "compilers": "['x-fast']",
        "static_batch": true,
        "static_shape": true,
        "controlnet": "None",
        "unet_dim": 4,
        "device": "cuda",
        "cache_dir": "/ceph/hdd/staff/charpent/.cache/models2qdiqez8",
        "batch_size": 1,
        "model_name": "vit_large_patch14_clip_224.laion2b_ft_in12k_in1k",
        "max_batch_size": 1,
        "qtype_weight": "torch.qint8",
        "qtype_activation": "torch.quint8",
        "qobserver": "<class 'torch.ao.quantization.observer.MinMaxObserver'>",
        "qscheme": "torch.per_tensor_symmetric",
        "qconfig": "x86",
        "group_size": 128,
        "damp_percent": 0.1,
        "save_dir": ".models/optimized_model",
        "fn_to_compile": "forward",
        "save_load_fn": "x-fast"
    }
}