radames commited on
Commit
d3237c9
1 Parent(s): edcf6dc

libtcmalloc

Browse files
Files changed (3) hide show
  1. Dockerfile +3 -0
  2. app-img2img.py +1 -1
  3. app-txt2img.py +1 -1
Dockerfile CHANGED
@@ -11,7 +11,9 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
11
  python3-dev \
12
  git \
13
  ffmpeg \
 
14
  && apt-get clean && rm -rf /var/lib/apt/lists/*
 
15
 
16
  WORKDIR /code
17
 
@@ -36,5 +38,6 @@ WORKDIR $HOME/app
36
  # Copy the current directory contents into the container at $HOME/app setting the owner to the user
37
  COPY --chown=user . $HOME/app
38
 
 
39
  CMD ["uvicorn", "app-img2img:app", "--host", "0.0.0.0", "--port", "7860"]
40
  # CMD ["uvicorn", "app-txt2img:app", "--host", "0.0.0.0", "--port", "7860"]
 
11
  python3-dev \
12
  git \
13
  ffmpeg \
14
+ google-perftools \
15
  && apt-get clean && rm -rf /var/lib/apt/lists/*
16
+
17
 
18
  WORKDIR /code
19
 
 
38
  # Copy the current directory contents into the container at $HOME/app setting the owner to the user
39
  COPY --chown=user . $HOME/app
40
 
41
+ ENV LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc.so.4
42
  CMD ["uvicorn", "app-img2img:app", "--host", "0.0.0.0", "--port", "7860"]
43
  # CMD ["uvicorn", "app-txt2img:app", "--host", "0.0.0.0", "--port", "7860"]
app-img2img.py CHANGED
@@ -102,7 +102,7 @@ def predict(
102
  ):
103
  generator = torch.manual_seed(params.seed)
104
  # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.
105
- num_inference_steps = 3
106
  results = pipe(
107
  prompt_embeds=prompt_embeds,
108
  generator=generator,
 
102
  ):
103
  generator = torch.manual_seed(params.seed)
104
  # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.
105
+ num_inference_steps = 4
106
  results = pipe(
107
  prompt_embeds=prompt_embeds,
108
  generator=generator,
app-txt2img.py CHANGED
@@ -100,7 +100,7 @@ def predict(params: InputParams):
100
  generator = torch.manual_seed(params.seed)
101
  prompt_embeds = compel_proc(params.prompt)
102
  # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.
103
- num_inference_steps = 8
104
  results = pipe(
105
  prompt_embeds=prompt_embeds,
106
  generator=generator,
 
100
  generator = torch.manual_seed(params.seed)
101
  prompt_embeds = compel_proc(params.prompt)
102
  # Can be set to 1~50 steps. LCM support fast inference even <= 4 steps. Recommend: 1~8 steps.
103
+ num_inference_steps = 4
104
  results = pipe(
105
  prompt_embeds=prompt_embeds,
106
  generator=generator,