megaaziib commited on
Commit
0abf475
1 Parent(s): d324b65

add config

Browse files
Files changed (1) hide show
  1. config.py +106 -0
config.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ from multiprocessing import cpu_count
4
+
5
+ class Config:
6
+ def __init__(self):
7
+ self.device = "cuda:0"
8
+ self.is_half = True
9
+ self.n_cpu = 0
10
+ self.gpu_name = None
11
+ self.gpu_mem = None
12
+ (
13
+ self.python_cmd,
14
+ self.listen_port,
15
+ self.colab,
16
+ self.noparallel,
17
+ self.noautoopen,
18
+ self.api
19
+ ) = self.arg_parse()
20
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
21
+
22
+ @staticmethod
23
+ def arg_parse() -> tuple:
24
+ parser = argparse.ArgumentParser()
25
+ parser.add_argument("--port", type=int, default=7865, help="Listen port")
26
+ parser.add_argument(
27
+ "--pycmd", type=str, default="python", help="Python command"
28
+ )
29
+ parser.add_argument("--colab", action="store_true", help="Launch in colab")
30
+ parser.add_argument(
31
+ "--noparallel", action="store_true", help="Disable parallel processing"
32
+ )
33
+ parser.add_argument(
34
+ "--noautoopen",
35
+ action="store_true",
36
+ help="Do not open in browser automatically",
37
+ )
38
+ parser.add_argument("--api", action="store_true", help="Launch with api")
39
+ cmd_opts = parser.parse_args()
40
+
41
+ cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
42
+
43
+ return (
44
+ cmd_opts.pycmd,
45
+ cmd_opts.port,
46
+ cmd_opts.colab,
47
+ cmd_opts.noparallel,
48
+ cmd_opts.noautoopen,
49
+ cmd_opts.api
50
+ )
51
+
52
+ def device_config(self) -> tuple:
53
+ if torch.cuda.is_available():
54
+ i_device = int(self.device.split(":")[-1])
55
+ self.gpu_name = torch.cuda.get_device_name(i_device)
56
+ if (
57
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
58
+ or "P40" in self.gpu_name.upper()
59
+ or "1060" in self.gpu_name
60
+ or "1070" in self.gpu_name
61
+ or "1080" in self.gpu_name
62
+ ):
63
+ print("16系/10系显卡和P40强制单精度")
64
+ self.is_half = False
65
+
66
+ else:
67
+ self.gpu_name = None
68
+ self.gpu_mem = int(
69
+ torch.cuda.get_device_properties(i_device).total_memory
70
+ / 1024
71
+ / 1024
72
+ / 1024
73
+ + 0.4
74
+ )
75
+ elif torch.backends.mps.is_available():
76
+ print("没有发现支持的N卡, 使用MPS进行推理")
77
+ self.device = "mps"
78
+ self.is_half = False
79
+ else:
80
+ print("没有发现支持的N卡, 使用CPU进行推理")
81
+ self.device = "cpu"
82
+ self.is_half = False
83
+
84
+ if self.n_cpu == 0:
85
+ self.n_cpu = cpu_count()
86
+
87
+ if self.is_half:
88
+ # 6G显存配置
89
+ x_pad = 3
90
+ x_query = 10
91
+ x_center = 60
92
+ x_max = 65
93
+ else:
94
+ # 5G显存配置
95
+ x_pad = 1
96
+ x_query = 6
97
+ x_center = 38
98
+ x_max = 41
99
+
100
+ if self.gpu_mem != None and self.gpu_mem <= 4:
101
+ x_pad = 1
102
+ x_query = 5
103
+ x_center = 30
104
+ x_max = 32
105
+
106
+ return x_pad, x_query, x_center, x_max