File size: 3,139 Bytes
8c29929
 
 
 
5068356
8c29929
 
005ca50
54462ee
4ff790f
 
8c29929
 
 
 
 
4ff790f
 
8c29929
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c250b1
8c29929
 
 
 
 
 
 
 
 
 
 
 
 
 
99f9d8f
538ae45
 
8c29929
 
 
 
 
f4bcfc3
8c29929
 
1c714f0
8c29929
1c714f0
54f74b3
485108a
 
8c29929
1dc8abe
8c29929
1dc8abe
8c29929
485108a
8c29929
125ab5d
8c29929
 
65d7508
8c29929
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
{
    "name": "cpu_inference_transformers_token-classification_microsoft/deberta-v3-base",
    "backend": {
        "name": "pytorch",
        "version": "2.4.1+cpu",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "token-classification",
        "library": "transformers",
        "model_type": "deberta-v2",
        "model": "microsoft/deberta-v3-base",
        "processor": "microsoft/deberta-v3-base",
        "device": "cpu",
        "device_ids": null,
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 1,
            "num_choices": 2,
            "sequence_length": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": false,
        "device_isolation_action": null,
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7763 64-Core Processor",
        "cpu_count": 4,
        "cpu_ram_mb": 16766.7712,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-6.8.0-1014-azure-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.15",
        "optimum_benchmark_version": "0.5.0",
        "optimum_benchmark_commit": "39d0314f9cf4240111640faabd3741cc6d1a4aa3",
        "transformers_version": "4.44.2",
        "transformers_commit": null,
        "accelerate_version": "0.34.2",
        "accelerate_commit": null,
        "diffusers_version": "0.30.3",
        "diffusers_commit": null,
        "optimum_version": "1.22.0",
        "optimum_commit": null,
        "timm_version": "1.0.9",
        "timm_commit": null,
        "peft_version": null,
        "peft_commit": null
    }
}