File size: 3,213 Bytes
63d828a
 
 
 
7fe94aa
63d828a
 
7978bec
61de0dd
1fe96f6
 
63d828a
 
 
 
 
1fe96f6
 
63d828a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83053bc
63d828a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b495063
 
63d828a
 
 
 
 
5b78851
63d828a
 
5b78851
63d828a
76642cc
63d828a
 
 
 
 
c68559e
63d828a
58fdf82
63d828a
5127a37
63d828a
58fdf82
63d828a
 
 
ea968f1
63d828a
 
5f2613c
63d828a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
{
    "name": "cuda_inference_transformers_token-classification_microsoft/deberta-v3-base",
    "backend": {
        "name": "pytorch",
        "version": "2.4.0+cu121",
        "_target_": "optimum_benchmark.backends.pytorch.backend.PyTorchBackend",
        "task": "token-classification",
        "library": "transformers",
        "model_type": "deberta-v2",
        "model": "microsoft/deberta-v3-base",
        "processor": "microsoft/deberta-v3-base",
        "device": "cuda",
        "device_ids": "0",
        "seed": 42,
        "inter_op_num_threads": null,
        "intra_op_num_threads": null,
        "model_kwargs": {},
        "processor_kwargs": {},
        "no_weights": true,
        "device_map": null,
        "torch_dtype": null,
        "eval_mode": true,
        "to_bettertransformer": false,
        "low_cpu_mem_usage": null,
        "attn_implementation": null,
        "cache_implementation": null,
        "autocast_enabled": false,
        "autocast_dtype": null,
        "torch_compile": false,
        "torch_compile_target": "forward",
        "torch_compile_config": {},
        "quantization_scheme": null,
        "quantization_config": {},
        "deepspeed_inference": false,
        "deepspeed_inference_config": {},
        "peft_type": null,
        "peft_config": {}
    },
    "scenario": {
        "name": "inference",
        "_target_": "optimum_benchmark.scenarios.inference.scenario.InferenceScenario",
        "iterations": 1,
        "duration": 1,
        "warmup_runs": 1,
        "input_shapes": {
            "batch_size": 1,
            "num_choices": 2,
            "sequence_length": 2
        },
        "new_tokens": null,
        "memory": true,
        "latency": true,
        "energy": true,
        "forward_kwargs": {},
        "generate_kwargs": {
            "max_new_tokens": 2,
            "min_new_tokens": 2
        },
        "call_kwargs": {
            "num_inference_steps": 2
        }
    },
    "launcher": {
        "name": "process",
        "_target_": "optimum_benchmark.launchers.process.launcher.ProcessLauncher",
        "device_isolation": true,
        "device_isolation_action": "error",
        "numactl": false,
        "numactl_kwargs": {},
        "start_method": "spawn"
    },
    "environment": {
        "cpu": " AMD EPYC 7R32",
        "cpu_count": 16,
        "cpu_ram_mb": 66697.25696,
        "system": "Linux",
        "machine": "x86_64",
        "platform": "Linux-5.10.220-209.869.amzn2.x86_64-x86_64-with-glibc2.35",
        "processor": "x86_64",
        "python_version": "3.10.12",
        "gpu": [
            "NVIDIA A10G"
        ],
        "gpu_count": 1,
        "gpu_vram_mb": 24146608128,
        "optimum_benchmark_version": "0.4.0",
        "optimum_benchmark_commit": null,
        "transformers_version": "4.44.0",
        "transformers_commit": null,
        "accelerate_version": "0.33.0",
        "accelerate_commit": null,
        "diffusers_version": "0.30.0",
        "diffusers_commit": null,
        "optimum_version": null,
        "optimum_commit": null,
        "timm_version": "1.0.8",
        "timm_commit": null,
        "peft_version": null,
        "peft_commit": null
    }
}