Files changed (6) hide show
  1. Dockerfile +0 -90
  2. README.md +2 -1
  3. app.py +26 -57
  4. model.py +0 -4
  5. requirements.txt +3 -3
  6. settings.py +0 -1
Dockerfile DELETED
@@ -1,90 +0,0 @@
1
- # Download LFS content while building in order to make this step cacheable
2
- # #===== LFS =====
3
- # FROM alpine/git:2.36.2 AS lfs
4
- # WORKDIR /app
5
- # COPY --link .lfs.hf.co .
6
- # RUN --mount=type=secret,id=SPACE_REPOSITORY,mode=0444,required=true \
7
- # git init \
8
- # && git remote add origin $(cat /run/secrets/SPACE_REPOSITORY) \
9
- # && git add --all \
10
- # && git config user.email "name@mail.com" \
11
- # && git config user.name "Name" \
12
- # && git commit -m "lfs" \
13
- # && git lfs pull \
14
- # && rm -rf .git .gitattributes
15
- # #===============
16
-
17
- FROM nvidia/cuda:11.8.0-runtime-ubuntu18.04
18
- # BEGIN Static part
19
- ENV DEBIAN_FRONTEND=noninteractive \
20
- TZ=Europe/Paris
21
-
22
- RUN apt-get update && apt-get install -y \
23
- git \
24
- make build-essential libssl-dev zlib1g-dev \
25
- libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm \
26
- libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev git-lfs \
27
- ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx \
28
- && rm -rf /var/lib/apt/lists/* \
29
- && git lfs install
30
-
31
- # User
32
- RUN useradd -m -u 1000 user
33
- USER user
34
- ENV HOME=/home/user \
35
- PATH=/home/user/.local/bin:$PATH
36
- WORKDIR /home/user/app
37
-
38
- # Pyenv
39
- RUN curl https://pyenv.run | bash
40
- ENV PATH=$HOME/.pyenv/shims:$HOME/.pyenv/bin:$PATH
41
-
42
- ARG PIP_VERSION=22.3.1
43
- ARG PYTHON_VERSION=3.10
44
- # Python
45
- RUN pyenv install $PYTHON_VERSION && \
46
- pyenv global $PYTHON_VERSION && \
47
- pyenv rehash && \
48
- pip install --no-cache-dir --upgrade pip==${PIP_VERSION} setuptools wheel && \
49
- pip install --no-cache-dir \
50
- datasets \
51
- "huggingface-hub>=0.12.1" "protobuf<4" "click<8.1"
52
-
53
- #^ Waiting for https://github.com/huggingface/huggingface_hub/pull/1345/files to be merge
54
-
55
- USER root
56
- # User Debian packages
57
- ## Security warning : Potential user code executed as root (build time)
58
- RUN --mount=target=/root/packages.txt,source=packages.txt \
59
- apt-get update && \
60
- xargs -r -a /root/packages.txt apt-get install -y \
61
- && rm -rf /var/lib/apt/lists/*
62
-
63
- USER user
64
-
65
- # Pre requirements (e.g. upgrading pip)
66
- RUN --mount=target=pre-requirements.txt,source=pre-requirements.txt \
67
- pip install --no-cache-dir -r pre-requirements.txt
68
-
69
- # Python packages
70
- RUN pip install --pre torch --index-url https://download.pytorch.org/whl/nightly/cu117
71
- RUN --mount=target=requirements.txt,source=requirements.txt \
72
- pip install --no-cache-dir -r requirements.txt
73
-
74
- ARG SDK=gradio \
75
- SDK_VERSION=3.27.0
76
- RUN pip install --no-cache-dir \
77
- ${SDK}==${SDK_VERSION}
78
-
79
- # App
80
- # COPY --link --chown=1000 --from=lfs /app /home/user/app
81
- COPY --link --chown=1000 ./ /home/user/app
82
- ENV PYTHONPATH=$HOME/app \
83
- PYTHONUNBUFFERED=1 \
84
- GRADIO_ALLOW_FLAGGING=never \
85
- GRADIO_NUM_PORTS=1 \
86
- GRADIO_SERVER_NAME=0.0.0.0 \
87
- GRADIO_THEME=huggingface \
88
- SYSTEM=spaces
89
-
90
- CMD ["python", "app.py"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -3,7 +3,8 @@ title: IF
3
  emoji: 🔥
4
  colorFrom: pink
5
  colorTo: red
6
- sdk: docker
 
7
  python_version: 3.10.11
8
  app_file: app.py
9
  pinned: false
 
3
  emoji: 🔥
4
  colorFrom: pink
5
  colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.27.0
8
  python_version: 3.10.11
9
  app_file: app.py
10
  pinned: false
app.py CHANGED
@@ -6,9 +6,6 @@ import json
6
  import os
7
  import random
8
  import tempfile
9
- import shortuuid
10
- from apscheduler.schedulers.background import BackgroundScheduler
11
- import shutil
12
 
13
  import gradio as gr
14
  import torch
@@ -108,18 +105,14 @@ def upload_stage1_result(stage1_param_path: str, stage1_result_path: str,
108
  if not UPLOAD_REPO_ID:
109
  return
110
  try:
111
- folder_params = "tmp/results/stage1_params"
112
- folder_results = "tmp/results/stage1_results"
113
-
114
- path_params = f"{folder_params}/{save_name}.json"
115
- path_results = f"{folder_results}/{save_name}.pth"
116
-
117
- os.makedirs(folder_params, exist_ok=True)
118
- os.makedirs(folder_results, exist_ok=True)
119
-
120
- shutil.copy(stage1_param_path, path_params)
121
- shutil.copy(stage1_result_path, path_results)
122
-
123
  except Exception as e:
124
  print(e)
125
 
@@ -146,20 +139,17 @@ def upload_stage2_info(stage1_param_file_hash_name: str,
146
  param_file.write(json.dumps(stage2_params))
147
  stage2_param_file_hash_name = get_param_file_hash_name(param_file.name)
148
  save_name = f'{stage1_param_file_hash_name}_{stage2_param_file_hash_name}'
149
-
150
- try:
151
- folder_params = "tmp/results/stage2_params"
152
 
153
- os.makedirs(folder_params, exist_ok=True)
154
- path_params = f"{folder_params}/{save_name}.json"
155
- shutil.copy(param_file.name, path_params)
156
-
 
157
  if UPLOAD_RESULT_IMAGE:
158
- folder_results = "tmp/results/stage2_results"
159
- os.makedirs(folder_results, exist_ok=True)
160
- path_results = f"{folder_results}/{save_name}.png"
161
- shutil.copy(stage2_output_path, path_results)
162
-
163
  except Exception as e:
164
  print(e)
165
 
@@ -196,16 +186,16 @@ def upload_stage2_3_info(stage1_param_file_hash_name: str,
196
  save_name = f'{stage1_param_file_hash_name}_{stage2_3_param_file_hash_name}'
197
 
198
  try:
199
- folder_params = "tmp/results/stage2_3_params"
200
- os.makedirs(folder_params, exist_ok=True)
201
- path_params = f"{folder_params}/{save_name}.json"
202
- shutil.copy(param_file.name, path_params)
203
-
204
  if UPLOAD_RESULT_IMAGE:
205
- folder_results = "tmp/results/stage2_3_results"
206
- os.makedirs(folder_results, exist_ok=True)
207
- path_results = f"{folder_results}/{save_name}.png"
208
- shutil.copy(stage2_3_output_path, path_results)
 
209
  except Exception as e:
210
  print(e)
211
 
@@ -228,22 +218,6 @@ def show_gallery_view() -> tuple[dict, dict]:
228
  def show_upscaled_view() -> tuple[dict, dict]:
229
  return _update_result_view(False)
230
 
231
- def upload_files():
232
- """Zips files and uploads to dataset. Local data is deleted
233
- """
234
- if os.path.exists("tmp/results") and os.path.isdir("tmp/results"):
235
- try:
236
- random_folder = random.randint(0,1000)
237
- shutil.make_archive("tmp/results", 'zip', "tmp/results")
238
- hf_api.upload_file(
239
- path_or_fileobj="tmp/results.zip",
240
- path_in_repo=f"{random_folder}/results_{shortuuid.uuid()}.zip",
241
- repo_id=UPLOAD_REPO_ID,
242
- repo_type="dataset",
243
- )
244
- shutil.rmtree("tmp/results")
245
- except Exception as e:
246
- print(e)
247
 
248
  examples = [
249
  'high quality dslr photo, a photo product of a lemon inspired by natural and organic materials, wooden accents, intricately decorated with glowing vines of led lights, inspired by baroque luxury',
@@ -692,10 +666,5 @@ with gr.Blocks(css='style.css') as demo:
692
  ],
693
  queue=False,
694
  )
695
-
696
- if UPLOAD_REPO_ID:
697
- scheduler = BackgroundScheduler()
698
- scheduler.add_job(func=upload_files, trigger="interval", seconds=60*20)
699
- scheduler.start()
700
 
701
  demo.queue(api_open=False, max_size=MAX_QUEUE_SIZE).launch(debug=DEBUG)
 
6
  import os
7
  import random
8
  import tempfile
 
 
 
9
 
10
  import gradio as gr
11
  import torch
 
105
  if not UPLOAD_REPO_ID:
106
  return
107
  try:
108
+ hf_api.upload_file(path_or_fileobj=stage1_param_path,
109
+ path_in_repo=f'stage1_params/{save_name}.json',
110
+ repo_id=UPLOAD_REPO_ID,
111
+ repo_type='dataset')
112
+ hf_api.upload_file(path_or_fileobj=stage1_result_path,
113
+ path_in_repo=f'stage1_results/{save_name}.pth',
114
+ repo_id=UPLOAD_REPO_ID,
115
+ repo_type='dataset')
 
 
 
 
116
  except Exception as e:
117
  print(e)
118
 
 
139
  param_file.write(json.dumps(stage2_params))
140
  stage2_param_file_hash_name = get_param_file_hash_name(param_file.name)
141
  save_name = f'{stage1_param_file_hash_name}_{stage2_param_file_hash_name}'
 
 
 
142
 
143
+ try:
144
+ hf_api.upload_file(path_or_fileobj=param_file.name,
145
+ path_in_repo=f'stage2_params/{save_name}.json',
146
+ repo_id=UPLOAD_REPO_ID,
147
+ repo_type='dataset')
148
  if UPLOAD_RESULT_IMAGE:
149
+ hf_api.upload_file(path_or_fileobj=stage2_output_path,
150
+ path_in_repo=f'stage2_results/{save_name}.png',
151
+ repo_id=UPLOAD_REPO_ID,
152
+ repo_type='dataset')
 
153
  except Exception as e:
154
  print(e)
155
 
 
186
  save_name = f'{stage1_param_file_hash_name}_{stage2_3_param_file_hash_name}'
187
 
188
  try:
189
+ hf_api.upload_file(path_or_fileobj=param_file.name,
190
+ path_in_repo=f'stage2_3_params/{save_name}.json',
191
+ repo_id=UPLOAD_REPO_ID,
192
+ repo_type='dataset')
 
193
  if UPLOAD_RESULT_IMAGE:
194
+ hf_api.upload_file(
195
+ path_or_fileobj=stage2_3_output_path,
196
+ path_in_repo=f'stage2_3_results/{save_name}.png',
197
+ repo_id=UPLOAD_REPO_ID,
198
+ repo_type='dataset')
199
  except Exception as e:
200
  print(e)
201
 
 
218
  def show_upscaled_view() -> tuple[dict, dict]:
219
  return _update_result_view(False)
220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
 
222
  examples = [
223
  'high quality dslr photo, a photo product of a lemon inspired by natural and organic materials, wooden accents, intricately decorated with glowing vines of led lights, inspired by baroque luxury',
 
666
  ],
667
  queue=False,
668
  )
 
 
 
 
 
669
 
670
  demo.queue(api_open=False, max_size=MAX_QUEUE_SIZE).launch(debug=DEBUG)
model.py CHANGED
@@ -59,10 +59,6 @@ class Model:
59
  if DISABLE_AUTOMATIC_CPU_OFFLOAD:
60
  self.pipe.to(self.device)
61
  self.super_res_1_pipe.to(self.device)
62
-
63
- self.pipe.unet.to(memory_format=torch.channels_last)
64
- self.pipe.unet = torch.compile(self.pipe.unet, mode="reduce-overhead", fullgraph=True)
65
-
66
  if not DISABLE_SD_X4_UPSCALER:
67
  self.super_res_2_pipe.to(self.device)
68
  else:
 
59
  if DISABLE_AUTOMATIC_CPU_OFFLOAD:
60
  self.pipe.to(self.device)
61
  self.super_res_1_pipe.to(self.device)
 
 
 
 
62
  if not DISABLE_SD_X4_UPSCALER:
63
  self.super_res_2_pipe.to(self.device)
64
  else:
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
  accelerate==0.18.0
2
  beautifulsoup4==4.12.2
3
  bitsandbytes==0.38.1
4
- git+https://github.com/huggingface/diffusers
5
  ftfy==6.1.1
6
  gradio==3.27.0
7
  huggingface_hub==0.14.1
@@ -10,7 +10,7 @@ Pillow==9.5.0
10
  safetensors==0.3.0
11
  sentencepiece==0.1.98
12
  tokenizers==0.13.3
 
 
13
  tqdm==4.65.0
14
  transformers==4.28.1
15
- shortuuid==1.0.11
16
- apscheduler==3.10.1
 
1
  accelerate==0.18.0
2
  beautifulsoup4==4.12.2
3
  bitsandbytes==0.38.1
4
+ diffusers==0.16.0
5
  ftfy==6.1.1
6
  gradio==3.27.0
7
  huggingface_hub==0.14.1
 
10
  safetensors==0.3.0
11
  sentencepiece==0.1.98
12
  tokenizers==0.13.3
13
+ torch==2.0.0
14
+ torchvision==0.15.1
15
  tqdm==4.65.0
16
  transformers==4.28.1
 
 
settings.py CHANGED
@@ -51,7 +51,6 @@ if os.getenv('IS_PUBLIC_DEMO') == '1':
51
  SHOW_NUM_IMAGES = False
52
  # parameters
53
  DEFAULT_CUSTOM_TIMESTEPS_1 = 'smart50'
54
- UPLOAD_RESULT_IMAGE = True
55
  # model
56
  DISABLE_AUTOMATIC_CPU_OFFLOAD = True
57
  RUN_GARBAGE_COLLECTION = False
 
51
  SHOW_NUM_IMAGES = False
52
  # parameters
53
  DEFAULT_CUSTOM_TIMESTEPS_1 = 'smart50'
 
54
  # model
55
  DISABLE_AUTOMATIC_CPU_OFFLOAD = True
56
  RUN_GARBAGE_COLLECTION = False