andreped commited on
Commit
4d5e2b0
1 Parent(s): b1a0ecc

Disable GPU for app + moved CPU disable logic to inside run_analysis method

Browse files
Files changed (3) hide show
  1. demo/app.py +1 -1
  2. livermask/livermask.py +0 -20
  3. livermask/utils/run.py +17 -0
demo/app.py CHANGED
@@ -29,7 +29,7 @@ def nifti_to_glb(path):
29
  def run_model(input_path):
30
  from livermask.utils.run import run_analysis
31
 
32
- run_analysis(cpu=False, extension='.nii', path=input_path, output='prediction', verbose=True, vessels=False, name="./model.h5")
33
 
34
  #cmd_docker = ["python3", "-m", "livermask.livermask", "--input", input_path, "--output", "prediction", "--verbose"]
35
  #sp.check_call(cmd_docker, shell=True) # @FIXME: shell=True here is not optimal -> starts a shell after calling script
 
29
  def run_model(input_path):
30
  from livermask.utils.run import run_analysis
31
 
32
+ run_analysis(cpu=True, extension='.nii', path=input_path, output='prediction', verbose=True, vessels=False, name="./model.h5")
33
 
34
  #cmd_docker = ["python3", "-m", "livermask.livermask", "--input", input_path, "--output", "prediction", "--verbose"]
35
  #sp.check_call(cmd_docker, shell=True) # @FIXME: shell=True here is not optimal -> starts a shell after calling script
livermask/livermask.py CHANGED
@@ -25,26 +25,6 @@ def main():
25
  ret = parser.parse_args(sys.argv[1:])
26
  print(ret)
27
 
28
- # only now do we call tensorflow, if necessary (to avoid redundant imports for livermask -h call)
29
- import tensorflow as tf
30
-
31
- if ret.cpu:
32
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
33
- if not tf.test.is_gpu_available():
34
- tf.config.set_visible_devices([], 'GPU')
35
- visible_devices = tf.config.get_visible_devices()
36
- else:
37
- gpus = tf.config.experimental.list_physical_devices('GPU')
38
- try:
39
- # Currently, memory growth needs to be the same across GPUs
40
- for gpu in gpus:
41
- tf.config.experimental.set_memory_growth(gpu, enable=True)
42
- logical_gpus = tf.config.experimental.list_logical_devices('GPU')
43
- print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
44
- except RuntimeError as e:
45
- # Memory growth must be set before GPUs have been initialized
46
- print(e)
47
-
48
  if ret.input is None:
49
  raise ValueError("Please, provide an input.")
50
  if ret.output is None:
 
25
  ret = parser.parse_args(sys.argv[1:])
26
  print(ret)
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  if ret.input is None:
29
  raise ValueError("Please, provide an input.")
30
  if ret.output is None:
livermask/utils/run.py CHANGED
@@ -29,6 +29,23 @@ def run_analysis(path, output, cpu, verbose, vessels, extension, name=None, name
29
  path = path.replace("\\", "/")
30
  output = output.replace("\\", "/")
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  # enable verbose or not
33
  log = verboseHandler(verbose)
34
 
 
29
  path = path.replace("\\", "/")
30
  output = output.replace("\\", "/")
31
 
32
+ if cpu:
33
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
34
+ if not tf.test.is_gpu_available():
35
+ tf.config.set_visible_devices([], 'GPU')
36
+ visible_devices = tf.config.get_visible_devices()
37
+ else:
38
+ gpus = tf.config.experimental.list_physical_devices('GPU')
39
+ try:
40
+ # Currently, memory growth needs to be the same across GPUs
41
+ for gpu in gpus:
42
+ tf.config.experimental.set_memory_growth(gpu, enable=True)
43
+ logical_gpus = tf.config.experimental.list_logical_devices('GPU')
44
+ print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
45
+ except RuntimeError as e:
46
+ # Memory growth must be set before GPUs have been initialized
47
+ print(e)
48
+
49
  # enable verbose or not
50
  log = verboseHandler(verbose)
51