andreped commited on
Commit
ec64097
1 Parent(s): 7fc16e4

Download models in Dockerfile + revert request changes

Browse files
Files changed (4) hide show
  1. Dockerfile +4 -4
  2. demo/app.py +1 -1
  3. livermask/utils/fetch.py +1 -5
  4. livermask/utils/run.py +12 -10
Dockerfile CHANGED
@@ -22,6 +22,10 @@ WORKDIR /code
22
  RUN apt-get update -y
23
  RUN apt install git --fix-missing -y
24
 
 
 
 
 
25
  # install dependencies
26
  COPY ./demo/requirements.txt /code/demo/requirements.txt
27
  RUN python3.7 -m pip install --no-cache-dir --upgrade -r /code/demo/requirements.txt
@@ -29,10 +33,6 @@ RUN python3.7 -m pip install --no-cache-dir --upgrade -r /code/demo/requirements
29
  # resolve issue with tf==2.4 and gradio dependency collision issue
30
  RUN python3.7 -m pip install --force-reinstall typing_extensions==4.0.0
31
 
32
- # allow container to access outside world
33
- RUN apt-get update && apt install iptables -y
34
- RUN sysctl net.ipv4.conf.all.forwarding=1
35
-
36
  # Set up a new user named "user" with user ID 1000
37
  RUN useradd -m -u 1000 user
38
 
 
22
  RUN apt-get update -y
23
  RUN apt install git --fix-missing -y
24
 
25
+ # Download pretrained parenchyma model
26
+ RUN wget "https://github.com/andreped/livermask/releases/download/trained-models-v1/model.h5"
27
+ COPY ./model.h5 /code/model.h5
28
+
29
  # install dependencies
30
  COPY ./demo/requirements.txt /code/demo/requirements.txt
31
  RUN python3.7 -m pip install --no-cache-dir --upgrade -r /code/demo/requirements.txt
 
33
  # resolve issue with tf==2.4 and gradio dependency collision issue
34
  RUN python3.7 -m pip install --force-reinstall typing_extensions==4.0.0
35
 
 
 
 
 
36
  # Set up a new user named "user" with user ID 1000
37
  RUN useradd -m -u 1000 user
38
 
demo/app.py CHANGED
@@ -29,7 +29,7 @@ def nifti_to_glb(path):
29
  def run_model(input_path):
30
  from livermask.utils.run import run_analysis
31
 
32
- run_analysis(cpu=False, extension='.nii', path=input_path, output='prediction', verbose=True, vessels=False)
33
 
34
  #cmd_docker = ["python3", "-m", "livermask.livermask", "--input", input_path, "--output", "prediction", "--verbose"]
35
  #sp.check_call(cmd_docker, shell=True) # @FIXME: shell=True here is not optimal -> starts a shell after calling script
 
29
  def run_model(input_path):
30
  from livermask.utils.run import run_analysis
31
 
32
+ run_analysis(cpu=False, extension='.nii', path=input_path, output='prediction', verbose=True, vessels=False, name="./model.h5")
33
 
34
  #cmd_docker = ["python3", "-m", "livermask.livermask", "--input", input_path, "--output", "prediction", "--verbose"]
35
  #sp.check_call(cmd_docker, shell=True) # @FIXME: shell=True here is not optimal -> starts a shell after calling script
livermask/utils/fetch.py CHANGED
@@ -12,11 +12,7 @@ def download(url, destination_file):
12
  mtime = os.path.getmtime(destination_file)
13
  headers["if-modified-since"] = formatdate(mtime, usegmt=True)
14
 
15
- # relevant for docker
16
- session = requests.Session()
17
- session.trust_env = False
18
-
19
- response = session.get(url, headers=headers, stream=True)
20
  response.raise_for_status()
21
 
22
  if response.status_code == requests.codes.not_modified:
 
12
  mtime = os.path.getmtime(destination_file)
13
  headers["if-modified-since"] = formatdate(mtime, usegmt=True)
14
 
15
+ response = requests.get(url, headers=headers, stream=True)
 
 
 
 
16
  response.raise_for_status()
17
 
18
  if response.status_code == requests.codes.not_modified:
livermask/utils/run.py CHANGED
@@ -24,22 +24,24 @@ import logging as log
24
  from .utils import get_model, get_vessel_model
25
 
26
 
27
- def run_analysis(path, output, cpu, verbose, vessels, extension):
28
  # fix paths (necessary if called as a package and not CLI)
29
  path = path.replace("\\", "/")
30
  output = output.replace("\\", "/")
31
 
32
  # enable verbose or not
33
  log = verboseHandler(verbose)
34
-
 
 
35
  cwd = "/".join(os.path.realpath(__file__).replace("\\", "/").split("/")[:-1]) + "/"
36
- name = cwd + "model.h5"
37
- name_vessel = cwd + "model-hepatic_vessel.npz"
38
-
39
- # get models
40
- get_model(name)
41
-
42
- if vessels:
43
  get_vessel_model(name_vessel)
44
 
45
  if not os.path.isdir(path):
@@ -61,4 +63,4 @@ def run_analysis(path, output, cpu, verbose, vessels, extension):
61
  # perform liver vessel segmentation
62
  vessel_segmenter(curr, output, cpu, verbose, multiple_flag, pred, name_vessel, extension)
63
  else:
64
- log.info("Unsuported file: " + curr)
 
24
  from .utils import get_model, get_vessel_model
25
 
26
 
27
+ def run_analysis(path, output, cpu, verbose, vessels, extension, name=None, name_vessel=None):
28
  # fix paths (necessary if called as a package and not CLI)
29
  path = path.replace("\\", "/")
30
  output = output.replace("\\", "/")
31
 
32
  # enable verbose or not
33
  log = verboseHandler(verbose)
34
+
35
+ # if model names are not provided, download them (necessary for docker,
36
+ # where we cannot perform HTTP requests from inside container)
37
  cwd = "/".join(os.path.realpath(__file__).replace("\\", "/").split("/")[:-1]) + "/"
38
+ log.info("Model names: " + str(name) + ", " + str(name_vessel))
39
+ if name is None:
40
+ name = cwd + "model.h5"
41
+ get_model(name)
42
+
43
+ if vessels and name_vessel is None:
44
+ name_vessel = cwd + "model-hepatic_vessel.npz"
45
  get_vessel_model(name_vessel)
46
 
47
  if not os.path.isdir(path):
 
63
  # perform liver vessel segmentation
64
  vessel_segmenter(curr, output, cpu, verbose, multiple_flag, pred, name_vessel, extension)
65
  else:
66
+ log.info("Unsupported file: " + curr)