molo322 commited on
Commit
efdfca5
1 Parent(s): baab495

Upload config.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.py +109 -0
config.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import sys
4
+ import torch
5
+ from multiprocessing import cpu_count
6
+
7
+
8
+ class Config:
9
+ def __init__(self):
10
+ self.device = "cuda:0"
11
+ self.is_half = True
12
+ self.n_cpu = 0
13
+ self.gpu_name = None
14
+ self.gpu_mem = None
15
+ (
16
+ self.python_cmd,
17
+ self.listen_port,
18
+ self.iscolab,
19
+ self.noparallel,
20
+ self.noautoopen,
21
+ ) = self.arg_parse()
22
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
23
+
24
+ def arg_parse(self) -> tuple:
25
+ parser = argparse.ArgumentParser()
26
+ parser.add_argument("--port", type=int, default=7865, help="Listen port")
27
+ parser.add_argument(
28
+ "--pycmd", type=str, default="python", help="Python command"
29
+ )
30
+ parser.add_argument("--colab", action="store_true", help="Launch in colab")
31
+ parser.add_argument(
32
+ "--noparallel", action="store_true", help="Disable parallel processing"
33
+ )
34
+ parser.add_argument(
35
+ "--noautoopen",
36
+ action="store_true",
37
+ help="Do not open in browser automatically",
38
+ )
39
+ cmd_opts = parser.parse_args()
40
+
41
+ cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
42
+
43
+ return (
44
+ cmd_opts.pycmd,
45
+ cmd_opts.port,
46
+ cmd_opts.colab,
47
+ cmd_opts.noparallel,
48
+ cmd_opts.noautoopen,
49
+ )
50
+
51
+ def device_config(self) -> tuple:
52
+ if torch.cuda.is_available():
53
+ self.gpu_name = torch.cuda.get_device_name(int(self.device.split(":")[-1]))
54
+ i_device = int(self.device.split(":")[-1])
55
+ self.gpu_name = torch.cuda.get_device_name(i_device)
56
+ if (
57
+ "16" in self.gpu_name
58
+ or "P40" in self.gpu_name.upper()
59
+ or "1070" in self.gpu_name
60
+ or "1080" in self.gpu_name
61
+ ):
62
+ print("16系显卡强制单精度")
63
+ self.is_half = False
64
+ for config_file in ["32k.json", "40k.json", "48k.json"]:
65
+ with open(f"configs/{config_file}", "a") as f:
66
+ strr = f.read().replace("true", "false")
67
+ f.write(strr)
68
+ self.gpu_mem = int(
69
+ torch.cuda.get_device_properties(i_device).total_memory
70
+ / 1024
71
+ / 1024
72
+ / 1024
73
+ + 0.4
74
+ )
75
+ if self.gpu_mem <= 4:
76
+ with open("trainset_preprocess_pipeline_print.py", "a") as f:
77
+ strr = f.read().replace("3.7", "3.0")
78
+ f.write(strr)
79
+ elif torch.backends.mps.is_available():
80
+ print("没有发现支持的N卡, 使用MPS进行推理")
81
+ self.device = "mps"
82
+ else:
83
+ print("没有发现支持的N卡, 使用CPU进行推理")
84
+ self.device = "cpu"
85
+
86
+ if self.n_cpu == 0:
87
+ self.n_cpu = cpu_count()
88
+
89
+ if self.is_half:
90
+ # 6G显存配置
91
+ x_pad = 3
92
+ x_query = 10
93
+ x_center = 60
94
+ x_max = 65
95
+ else:
96
+ # 5G显存配置
97
+ x_pad = 1
98
+ x_query = 6
99
+ x_center = 38
100
+ x_max = 41
101
+
102
+ if self.gpu_name is not None and self.gpu_mem is not None and self.gpu_mem <= 4:
103
+
104
+ x_pad = 1
105
+ x_query = 5
106
+ x_center = 30
107
+ x_max = 32
108
+
109
+ return x_pad, x_query, x_center, x_max